summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py65
-rw-r--r--numpy/_import_tools.py22
-rw-r--r--numpy/add_newdocs.py185
-rw-r--r--numpy/build_utils/__init__.py1
-rw-r--r--numpy/build_utils/common.py7
-rw-r--r--numpy/build_utils/waf.py6
-rw-r--r--numpy/compat/__init__.py10
-rw-r--r--numpy/compat/_inspect.py24
-rw-r--r--numpy/compat/py3k.py21
-rw-r--r--numpy/compat/setup.py2
-rw-r--r--numpy/compat/setupscons.py11
-rw-r--r--numpy/core/SConscript531
-rw-r--r--numpy/core/SConstruct2
-rw-r--r--numpy/core/__init__.py57
-rw-r--r--numpy/core/_internal.py25
-rw-r--r--numpy/core/_methods.py89
-rw-r--r--numpy/core/arrayprint.py53
-rw-r--r--numpy/core/bento.info3
-rw-r--r--numpy/core/blasdot/_dotblas.c23
-rw-r--r--numpy/core/bscript85
-rw-r--r--numpy/core/code_generators/__init__.py1
-rw-r--r--numpy/core/code_generators/cversions.py14
-rw-r--r--numpy/core/code_generators/cversions.txt6
-rw-r--r--numpy/core/code_generators/genapi.py18
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py6
-rw-r--r--numpy/core/code_generators/generate_ufunc_api.py2
-rw-r--r--numpy/core/code_generators/generate_umath.py21
-rw-r--r--numpy/core/code_generators/numpy_api.py10
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py102
-rw-r--r--numpy/core/defchararray.py603
-rw-r--r--numpy/core/fromnumeric.py383
-rw-r--r--numpy/core/function_base.py6
-rw-r--r--numpy/core/getlimits.py30
-rw-r--r--numpy/core/include/numpy/arrayobject.h11
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h7
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h83
-rw-r--r--numpy/core/include/numpy/noprefix.h14
-rw-r--r--numpy/core/include/numpy/npy_1_7_deprecated_api.h (renamed from numpy/core/include/numpy/npy_deprecated_api.h)21
-rw-r--r--numpy/core/include/numpy/npy_common.h57
-rw-r--r--numpy/core/include/numpy/npy_cpu.h4
-rw-r--r--numpy/core/include/numpy/npy_endian.h4
-rw-r--r--numpy/core/include/numpy/npy_math.h61
-rw-r--r--numpy/core/include/numpy/numpyconfig.h19
-rw-r--r--numpy/core/include/numpy/ufuncobject.h18
-rw-r--r--numpy/core/info.py3
-rw-r--r--numpy/core/machar.py31
-rw-r--r--numpy/core/memmap.py26
-rw-r--r--numpy/core/numeric.py257
-rw-r--r--numpy/core/numerictypes.py28
-rw-r--r--numpy/core/records.py36
-rw-r--r--numpy/core/scons_support.py272
-rw-r--r--numpy/core/setup.py129
-rw-r--r--numpy/core/setup_common.py42
-rw-r--r--numpy/core/setupscons.py111
-rw-r--r--numpy/core/shape_base.py10
-rw-r--r--numpy/core/src/multiarray/_datetime.h2
-rw-r--r--numpy/core/src/multiarray/array_assign.c40
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c6
-rw-r--r--numpy/core/src/multiarray/arrayobject.c140
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src46
-rw-r--r--numpy/core/src/multiarray/buffer.c38
-rw-r--r--numpy/core/src/multiarray/calculation.c1
-rw-r--r--numpy/core/src/multiarray/common.c211
-rw-r--r--numpy/core/src/multiarray/common.h28
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c436
-rw-r--r--numpy/core/src/multiarray/conversion_utils.h3
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/convert.h4
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c82
-rw-r--r--numpy/core/src/multiarray/ctors.c227
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c2
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c76
-rw-r--r--numpy/core/src/multiarray/descriptor.c2
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c18
-rw-r--r--numpy/core/src/multiarray/einsum.c.src5
-rw-r--r--numpy/core/src/multiarray/flagsobject.c62
-rw-r--r--numpy/core/src/multiarray/getset.c27
-rw-r--r--numpy/core/src/multiarray/item_selection.c554
-rw-r--r--numpy/core/src/multiarray/iterators.c53
-rw-r--r--numpy/core/src/multiarray/iterators.h3
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src62
-rw-r--r--numpy/core/src/multiarray/mapping.c786
-rw-r--r--numpy/core/src/multiarray/mapping.h28
-rw-r--r--numpy/core/src/multiarray/methods.c175
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src39
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c135
-rw-r--r--numpy/core/src/multiarray/nditer_api.c55
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c169
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c86
-rw-r--r--numpy/core/src/multiarray/number.c34
-rw-r--r--numpy/core/src/multiarray/numpymemoryview.c4
-rw-r--r--numpy/core/src/multiarray/numpymemoryview.h2
-rw-r--r--numpy/core/src/multiarray/numpyos.c5
-rw-r--r--numpy/core/src/multiarray/scalarapi.c42
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src79
-rw-r--r--numpy/core/src/multiarray/sequence.c15
-rw-r--r--numpy/core/src/multiarray/shape.c48
-rw-r--r--numpy/core/src/multiarray/testcalcs.py2
-rw-r--r--numpy/core/src/multiarray/ucsnarrow.c1
-rw-r--r--numpy/core/src/npymath/_signbit.c6
-rw-r--r--numpy/core/src/npymath/halffloat.c2
-rw-r--r--numpy/core/src/npymath/npy_math_private.h39
-rw-r--r--numpy/core/src/npysort/mergesort.c.src5
-rw-r--r--numpy/core/src/npysort/selection.c.src459
-rw-r--r--numpy/core/src/private/lowlevel_strided_loops.h116
-rw-r--r--numpy/core/src/private/npy_config.h20
-rw-r--r--numpy/core/src/private/npy_fpmath.h1
-rw-r--r--numpy/core/src/private/npy_partition.h559
-rw-r--r--numpy/core/src/private/npy_partition.h.src131
-rw-r--r--numpy/core/src/private/npy_pycompat.h18
-rw-r--r--numpy/core/src/scalarmathmodule.c.src111
-rw-r--r--numpy/core/src/umath/loops.c.src168
-rw-r--r--numpy/core/src/umath/loops.h356
-rw-r--r--numpy/core/src/umath/loops.h.src6
-rw-r--r--numpy/core/src/umath/operand_flag_tests.c.src105
-rw-r--r--numpy/core/src/umath/reduction.c7
-rw-r--r--numpy/core/src/umath/simd.inc.src848
-rw-r--r--numpy/core/src/umath/struct_ufunc_test.c.src122
-rw-r--r--numpy/core/src/umath/test_rational.c.src1389
-rw-r--r--numpy/core/src/umath/ufunc_object.c595
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c71
-rw-r--r--numpy/core/src/umath/umathmodule.c10
-rw-r--r--numpy/core/tests/test_api.py244
-rw-r--r--numpy/core/tests/test_arrayprint.py5
-rw-r--r--numpy/core/tests/test_blasdot.py66
-rw-r--r--numpy/core/tests/test_datetime.py13
-rw-r--r--numpy/core/tests/test_defchararray.py92
-rw-r--r--numpy/core/tests/test_deprecations.py292
-rw-r--r--numpy/core/tests/test_dtype.py46
-rw-r--r--numpy/core/tests/test_einsum.py12
-rw-r--r--numpy/core/tests/test_errstate.py52
-rw-r--r--numpy/core/tests/test_function_base.py1
-rw-r--r--numpy/core/tests/test_getlimits.py6
-rw-r--r--numpy/core/tests/test_half.py23
-rw-r--r--numpy/core/tests/test_indexerrors.py6
-rw-r--r--numpy/core/tests/test_indexing.py521
-rw-r--r--numpy/core/tests/test_item_selection.py65
-rw-r--r--numpy/core/tests/test_machar.py11
-rw-r--r--numpy/core/tests/test_memmap.py22
-rw-r--r--numpy/core/tests/test_multiarray.py1460
-rw-r--r--numpy/core/tests/test_multiarray_assignment.py4
-rw-r--r--numpy/core/tests/test_nditer.py153
-rw-r--r--numpy/core/tests/test_numeric.py456
-rw-r--r--numpy/core/tests/test_numerictypes.py2
-rw-r--r--numpy/core/tests/test_print.py97
-rw-r--r--numpy/core/tests/test_records.py23
-rw-r--r--numpy/core/tests/test_regression.py258
-rw-r--r--numpy/core/tests/test_scalarmath.py61
-rw-r--r--numpy/core/tests/test_scalarprint.py30
-rw-r--r--numpy/core/tests/test_shape_base.py143
-rw-r--r--numpy/core/tests/test_ufunc.py111
-rw-r--r--numpy/core/tests/test_umath.py173
-rw-r--r--numpy/core/tests/test_umath_complex.py76
-rw-r--r--numpy/core/tests/test_unicode.py26
-rw-r--r--numpy/ctypeslib.py41
-rw-r--r--numpy/distutils/__init__.py12
-rw-r--r--numpy/distutils/__version__.py2
-rw-r--r--numpy/distutils/ccompiler.py15
-rw-r--r--numpy/distutils/command/__init__.py12
-rw-r--r--numpy/distutils/command/autodist.py6
-rw-r--r--numpy/distutils/command/bdist_rpm.py2
-rw-r--r--numpy/distutils/command/build.py2
-rw-r--r--numpy/distutils/command/build_clib.py1
-rw-r--r--numpy/distutils/command/build_ext.py11
-rw-r--r--numpy/distutils/command/build_py.py5
-rw-r--r--numpy/distutils/command/build_scripts.py2
-rw-r--r--numpy/distutils/command/build_src.py22
-rw-r--r--numpy/distutils/command/config.py3
-rw-r--r--numpy/distutils/command/config_compiler.py2
-rw-r--r--numpy/distutils/command/develop.py2
-rw-r--r--numpy/distutils/command/egg_info.py2
-rw-r--r--numpy/distutils/command/install.py4
-rw-r--r--numpy/distutils/command/install_clib.py2
-rw-r--r--numpy/distutils/command/install_data.py2
-rw-r--r--numpy/distutils/command/install_headers.py2
-rw-r--r--numpy/distutils/command/scons.py589
-rw-r--r--numpy/distutils/command/sdist.py2
-rw-r--r--numpy/distutils/compat.py3
-rw-r--r--numpy/distutils/conv_template.py12
-rw-r--r--numpy/distutils/core.py21
-rw-r--r--numpy/distutils/cpuinfo.py34
-rw-r--r--numpy/distutils/environment.py2
-rw-r--r--numpy/distutils/exec_command.py60
-rw-r--r--numpy/distutils/extension.py11
-rw-r--r--numpy/distutils/fcompiler/__init__.py6
-rw-r--r--numpy/distutils/fcompiler/absoft.py1
-rw-r--r--numpy/distutils/fcompiler/compaq.py1
-rw-r--r--numpy/distutils/fcompiler/g95.py1
-rw-r--r--numpy/distutils/fcompiler/gnu.py8
-rw-r--r--numpy/distutils/fcompiler/hpux.py2
-rw-r--r--numpy/distutils/fcompiler/ibm.py7
-rw-r--r--numpy/distutils/fcompiler/intel.py20
-rw-r--r--numpy/distutils/fcompiler/lahey.py2
-rw-r--r--numpy/distutils/fcompiler/mips.py2
-rw-r--r--numpy/distutils/fcompiler/nag.py2
-rw-r--r--numpy/distutils/fcompiler/none.py1
-rw-r--r--numpy/distutils/fcompiler/pathf95.py4
-rw-r--r--numpy/distutils/fcompiler/pg.py6
-rw-r--r--numpy/distutils/fcompiler/sun.py2
-rw-r--r--numpy/distutils/fcompiler/vast.py2
-rw-r--r--numpy/distutils/from_template.py18
-rw-r--r--numpy/distutils/info.py1
-rw-r--r--numpy/distutils/intelccompiler.py2
-rw-r--r--numpy/distutils/interactive.py188
-rw-r--r--numpy/distutils/lib2def.py6
-rw-r--r--numpy/distutils/line_endings.py18
-rw-r--r--numpy/distutils/log.py22
-rw-r--r--numpy/distutils/mingw32ccompiler.py36
-rw-r--r--numpy/distutils/misc_util.py313
-rw-r--r--numpy/distutils/npy_pkg_config.py31
-rw-r--r--numpy/distutils/numpy_distribution.py2
-rw-r--r--numpy/distutils/pathccompiler.py2
-rw-r--r--numpy/distutils/setup.py1
-rw-r--r--numpy/distutils/setupscons.py17
-rw-r--r--numpy/distutils/system_info.py50
-rw-r--r--numpy/distutils/tests/f2py_ext/__init__.py1
-rw-r--r--numpy/distutils/tests/f2py_ext/setup.py2
-rw-r--r--numpy/distutils/tests/f2py_ext/tests/test_fib2.py2
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/__init__.py1
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/setup.py2
-rw-r--r--numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py2
-rw-r--r--numpy/distutils/tests/gen_ext/__init__.py1
-rw-r--r--numpy/distutils/tests/gen_ext/setup.py1
-rw-r--r--numpy/distutils/tests/gen_ext/tests/test_fib3.py2
-rw-r--r--numpy/distutils/tests/pyrex_ext/__init__.py1
-rw-r--r--numpy/distutils/tests/pyrex_ext/setup.py2
-rw-r--r--numpy/distutils/tests/pyrex_ext/tests/test_primes.py2
-rw-r--r--numpy/distutils/tests/setup.py2
-rw-r--r--numpy/distutils/tests/swig_ext/__init__.py1
-rw-r--r--numpy/distutils/tests/swig_ext/setup.py2
-rw-r--r--numpy/distutils/tests/swig_ext/tests/test_example.py2
-rw-r--r--numpy/distutils/tests/swig_ext/tests/test_example2.py2
-rw-r--r--numpy/distutils/tests/test_exec_command.py92
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py3
-rw-r--r--numpy/distutils/tests/test_fcompiler_intel.py2
-rw-r--r--numpy/distutils/tests/test_misc_util.py23
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py2
-rw-r--r--numpy/distutils/unixccompiler.py4
-rw-r--r--numpy/doc/__init__.py7
-rw-r--r--numpy/doc/basics.py33
-rw-r--r--numpy/doc/broadcasting.py2
-rw-r--r--numpy/doc/byteswapping.py6
-rw-r--r--numpy/doc/constants.py2
-rw-r--r--numpy/doc/creation.py8
-rw-r--r--numpy/doc/glossary.py2
-rw-r--r--numpy/doc/howtofind.py2
-rw-r--r--numpy/doc/indexing.py2
-rw-r--r--numpy/doc/internals.py2
-rw-r--r--numpy/doc/io.py2
-rw-r--r--numpy/doc/jargon.py2
-rw-r--r--numpy/doc/methods_vs_functions.py2
-rw-r--r--numpy/doc/misc.py2
-rw-r--r--numpy/doc/performance.py2
-rw-r--r--numpy/doc/structured_arrays.py9
-rw-r--r--numpy/doc/subclassing.py2
-rw-r--r--numpy/doc/ufuncs.py2
-rw-r--r--numpy/dual.py2
-rw-r--r--numpy/f2py/__init__.py11
-rw-r--r--numpy/f2py/__version__.py2
-rw-r--r--numpy/f2py/auxfuncs.py73
-rw-r--r--numpy/f2py/capi_maps.py58
-rw-r--r--numpy/f2py/cb_rules.py28
-rw-r--r--numpy/f2py/cfuncs.py32
-rw-r--r--numpy/f2py/common_rules.py16
-rwxr-xr-xnumpy/f2py/crackfortran.py485
-rw-r--r--numpy/f2py/diagnose.py153
-rwxr-xr-xnumpy/f2py/doc/collectinput.py20
-rw-r--r--numpy/f2py/docs/pytest.py2
-rw-r--r--numpy/f2py/docs/usersguide/setup_example.py2
-rwxr-xr-xnumpy/f2py/f2py2e.py82
-rw-r--r--numpy/f2py/f2py_testing.py14
-rw-r--r--numpy/f2py/f90mod_rules.py20
-rw-r--r--numpy/f2py/func2subr.py4
-rw-r--r--numpy/f2py/info.py1
-rw-r--r--numpy/f2py/rules.py78
-rw-r--r--numpy/f2py/setup.py6
-rwxr-xr-xnumpy/f2py/setupscons.py124
-rw-r--r--numpy/f2py/src/fortranobject.c2
-rw-r--r--numpy/f2py/src/fortranobject.h23
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py76
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py12
-rw-r--r--numpy/f2py/tests/test_callback.py55
-rw-r--r--numpy/f2py/tests/test_kind.py2
-rw-r--r--numpy/f2py/tests/test_mixed.py16
-rw-r--r--numpy/f2py/tests/test_return_character.py14
-rw-r--r--numpy/f2py/tests/test_return_complex.py9
-rw-r--r--numpy/f2py/tests/test_return_integer.py9
-rw-r--r--numpy/f2py/tests/test_return_logical.py11
-rw-r--r--numpy/f2py/tests/test_return_real.py9
-rw-r--r--numpy/f2py/tests/test_size.py16
-rw-r--r--numpy/f2py/tests/util.py5
-rw-r--r--numpy/f2py/use_rules.py8
-rw-r--r--numpy/fft/SConscript8
-rw-r--r--numpy/fft/SConstruct2
-rw-r--r--numpy/fft/__init__.py8
-rw-r--r--numpy/fft/fftpack.py26
-rw-r--r--numpy/fft/fftpack_litemodule.c12
-rw-r--r--numpy/fft/helper.py105
-rw-r--r--numpy/fft/info.py4
-rw-r--r--numpy/fft/setup.py1
-rw-r--r--numpy/fft/setupscons.py15
-rw-r--r--numpy/fft/tests/test_fftpack.py2
-rw-r--r--numpy/fft/tests/test_helper.py19
-rw-r--r--numpy/lib/SConscript7
-rw-r--r--numpy/lib/SConstruct2
-rw-r--r--numpy/lib/__init__.py39
-rw-r--r--numpy/lib/_datasource.py31
-rw-r--r--numpy/lib/_iotools.py25
-rw-r--r--numpy/lib/arraypad.py1612
-rw-r--r--numpy/lib/arraysetops.py40
-rw-r--r--numpy/lib/arrayterator.py12
-rw-r--r--numpy/lib/financial.py42
-rw-r--r--numpy/lib/format.py45
-rw-r--r--numpy/lib/function_base.py646
-rw-r--r--numpy/lib/index_tricks.py39
-rw-r--r--numpy/lib/info.py1
-rw-r--r--numpy/lib/nanfunctions.py812
-rw-r--r--numpy/lib/npyio.py121
-rw-r--r--numpy/lib/polynomial.py10
-rw-r--r--numpy/lib/recfunctions.py16
-rw-r--r--numpy/lib/scimath.py1
-rw-r--r--numpy/lib/setup.py2
-rw-r--r--numpy/lib/setupscons.py16
-rw-r--r--numpy/lib/shape_base.py35
-rw-r--r--numpy/lib/src/_compiled_base.c3
-rw-r--r--numpy/lib/stride_tricks.py11
-rw-r--r--numpy/lib/tests/test__datasource.py33
-rw-r--r--numpy/lib/tests/test__iotools.py18
-rw-r--r--numpy/lib/tests/test_arraypad.py45
-rw-r--r--numpy/lib/tests/test_arraysetops.py34
-rw-r--r--numpy/lib/tests/test_arrayterator.py6
-rw-r--r--numpy/lib/tests/test_financial.py10
-rw-r--r--numpy/lib/tests/test_format.py50
-rw-r--r--numpy/lib/tests/test_function_base.py317
-rw-r--r--numpy/lib/tests/test_index_tricks.py16
-rw-r--r--numpy/lib/tests/test_io.py573
-rw-r--r--numpy/lib/tests/test_nanfunctions.py417
-rw-r--r--numpy/lib/tests/test_polynomial.py6
-rw-r--r--numpy/lib/tests/test_recfunctions.py26
-rw-r--r--numpy/lib/tests/test_regression.py22
-rw-r--r--numpy/lib/tests/test_shape_base.py17
-rw-r--r--numpy/lib/tests/test_stride_tricks.py2
-rw-r--r--numpy/lib/tests/test_twodim_base.py5
-rw-r--r--numpy/lib/tests/test_type_check.py105
-rw-r--r--numpy/lib/tests/test_ufunclike.py2
-rw-r--r--numpy/lib/tests/test_utils.py8
-rw-r--r--numpy/lib/twodim_base.py82
-rw-r--r--numpy/lib/type_check.py12
-rw-r--r--numpy/lib/ufunclike.py3
-rw-r--r--numpy/lib/user_array.py31
-rw-r--r--numpy/lib/utils.py147
-rw-r--r--numpy/linalg/SConscript23
-rw-r--r--numpy/linalg/SConstruct2
-rw-r--r--numpy/linalg/__init__.py6
-rw-r--r--numpy/linalg/_gufuncs_linalg.py1454
-rw-r--r--numpy/linalg/bento.info21
-rw-r--r--numpy/linalg/blas_lite.c10660
-rw-r--r--numpy/linalg/bscript10
-rw-r--r--numpy/linalg/dlapack_lite.c36008
-rw-r--r--numpy/linalg/info.py1
-rw-r--r--numpy/linalg/lapack_lite/blas_lite.c21135
-rw-r--r--numpy/linalg/lapack_lite/clapack_scrub.py9
-rw-r--r--numpy/linalg/lapack_lite/dlamch.c (renamed from numpy/linalg/dlamch.c)0
-rw-r--r--numpy/linalg/lapack_lite/dlapack_lite.c100833
-rw-r--r--numpy/linalg/lapack_lite/f2c.h (renamed from numpy/linalg/f2c.h)0
-rw-r--r--numpy/linalg/lapack_lite/f2c_lite.c (renamed from numpy/linalg/f2c_lite.c)179
-rw-r--r--numpy/linalg/lapack_lite/fortran.py18
-rwxr-xr-xnumpy/linalg/lapack_lite/make_lite.py22
-rw-r--r--numpy/linalg/lapack_lite/python_xerbla.c (renamed from numpy/linalg/python_xerbla.c)6
-rw-r--r--numpy/linalg/lapack_lite/zlapack_lite.c (renamed from numpy/linalg/zlapack_lite.c)1708
-rw-r--r--numpy/linalg/lapack_litemodule.c74
-rw-r--r--numpy/linalg/linalg.py938
-rw-r--r--numpy/linalg/setup.py29
-rw-r--r--numpy/linalg/setupscons.py19
-rw-r--r--numpy/linalg/tests/test_build.py2
-rw-r--r--numpy/linalg/tests/test_deprecations.py24
-rw-r--r--numpy/linalg/tests/test_gufuncs_linalg.py500
-rw-r--r--numpy/linalg/tests/test_linalg.py556
-rw-r--r--numpy/linalg/tests/test_regression.py2
-rw-r--r--numpy/linalg/umath_linalg.c.src4346
-rw-r--r--numpy/ma/__init__.py10
-rw-r--r--numpy/ma/bench.py41
-rw-r--r--numpy/ma/core.py186
-rw-r--r--numpy/ma/extras.py54
-rw-r--r--numpy/ma/mrecords.py24
-rw-r--r--numpy/ma/setup.py2
-rw-r--r--numpy/ma/setupscons.py18
-rw-r--r--numpy/ma/tests/test_core.py188
-rw-r--r--numpy/ma/tests/test_extras.py183
-rw-r--r--numpy/ma/tests/test_mrecords.py53
-rw-r--r--numpy/ma/tests/test_old_ma.py215
-rw-r--r--numpy/ma/tests/test_regression.py29
-rw-r--r--numpy/ma/tests/test_subclassing.py10
-rw-r--r--numpy/ma/testutils.py5
-rw-r--r--numpy/ma/timer_comparison.py13
-rw-r--r--numpy/ma/version.py9
-rw-r--r--numpy/matlib.py2
-rw-r--r--numpy/matrixlib/__init__.py8
-rw-r--r--numpy/matrixlib/defmatrix.py21
-rw-r--r--numpy/matrixlib/setup.py2
-rw-r--r--numpy/matrixlib/setupscons.py13
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py18
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py2
-rw-r--r--numpy/matrixlib/tests/test_numeric.py2
-rw-r--r--numpy/matrixlib/tests/test_regression.py2
-rw-r--r--numpy/numarray/SConscript7
-rw-r--r--numpy/numarray/SConstruct2
-rw-r--r--numpy/numarray/__init__.py32
-rw-r--r--numpy/numarray/_capi.c4
-rw-r--r--numpy/numarray/alter_code1.py4
-rw-r--r--numpy/numarray/alter_code2.py3
-rw-r--r--numpy/numarray/compat.py1
-rw-r--r--numpy/numarray/convolve.py2
-rw-r--r--numpy/numarray/fft.py1
-rw-r--r--numpy/numarray/functions.py92
-rw-r--r--numpy/numarray/image.py2
-rw-r--r--numpy/numarray/linear_algebra.py1
-rw-r--r--numpy/numarray/ma.py1
-rw-r--r--numpy/numarray/matrix.py1
-rw-r--r--numpy/numarray/mlab.py1
-rw-r--r--numpy/numarray/nd_image.py2
-rw-r--r--numpy/numarray/numerictypes.py6
-rw-r--r--numpy/numarray/random_array.py1
-rw-r--r--numpy/numarray/session.py17
-rw-r--r--numpy/numarray/setup.py2
-rw-r--r--numpy/numarray/setupscons.py14
-rw-r--r--numpy/numarray/ufuncs.py1
-rw-r--r--numpy/numarray/util.py10
-rw-r--r--numpy/oldnumeric/__init__.py36
-rw-r--r--numpy/oldnumeric/alter_code1.py5
-rw-r--r--numpy/oldnumeric/alter_code2.py4
-rw-r--r--numpy/oldnumeric/array_printer.py1
-rw-r--r--numpy/oldnumeric/arrayfns.py8
-rw-r--r--numpy/oldnumeric/compat.py32
-rw-r--r--numpy/oldnumeric/fft.py1
-rw-r--r--numpy/oldnumeric/fix_default_axis.py7
-rw-r--r--numpy/oldnumeric/functions.py7
-rw-r--r--numpy/oldnumeric/linear_algebra.py12
-rw-r--r--numpy/oldnumeric/ma.py68
-rw-r--r--numpy/oldnumeric/matrix.py9
-rw-r--r--numpy/oldnumeric/misc.py19
-rw-r--r--numpy/oldnumeric/mlab.py29
-rw-r--r--numpy/oldnumeric/precision.py15
-rw-r--r--numpy/oldnumeric/random_array.py73
-rw-r--r--numpy/oldnumeric/rng.py20
-rw-r--r--numpy/oldnumeric/rng_stats.py1
-rw-r--r--numpy/oldnumeric/setup.py1
-rw-r--r--numpy/oldnumeric/setupscons.py8
-rw-r--r--numpy/oldnumeric/tests/test_oldnumeric.py2
-rw-r--r--numpy/oldnumeric/tests/test_regression.py2
-rw-r--r--numpy/oldnumeric/typeconv.py2
-rw-r--r--numpy/oldnumeric/ufuncs.py2
-rw-r--r--numpy/oldnumeric/user_array.py2
-rw-r--r--numpy/polynomial/__init__.py14
-rw-r--r--numpy/polynomial/chebyshev.py26
-rw-r--r--numpy/polynomial/hermite.py24
-rw-r--r--numpy/polynomial/hermite_e.py26
-rw-r--r--numpy/polynomial/laguerre.py26
-rw-r--r--numpy/polynomial/legendre.py24
-rw-r--r--numpy/polynomial/polynomial.py23
-rw-r--r--numpy/polynomial/polytemplate.py27
-rw-r--r--numpy/polynomial/polyutils.py14
-rw-r--r--numpy/polynomial/setup.py2
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py28
-rw-r--r--numpy/polynomial/tests/test_classes.py6
-rw-r--r--numpy/polynomial/tests/test_hermite.py28
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py28
-rw-r--r--numpy/polynomial/tests/test_laguerre.py28
-rw-r--r--numpy/polynomial/tests/test_legendre.py28
-rw-r--r--numpy/polynomial/tests/test_polynomial.py28
-rw-r--r--numpy/polynomial/tests/test_polyutils.py2
-rw-r--r--numpy/polynomial/tests/test_printing.py2
-rw-r--r--numpy/random/SConscript50
-rw-r--r--numpy/random/SConstruct2
-rw-r--r--numpy/random/__init__.py16
-rw-r--r--numpy/random/info.py1
-rw-r--r--numpy/random/mtrand/generate_mtrand_c.py7
-rw-r--r--numpy/random/mtrand/mtrand.c11640
-rw-r--r--numpy/random/mtrand/mtrand.pyx140
-rw-r--r--numpy/random/setup.py2
-rw-r--r--numpy/random/setupscons.py40
-rw-r--r--numpy/random/tests/test_random.py72
-rw-r--r--numpy/random/tests/test_regression.py49
-rw-r--r--numpy/setup.py2
-rw-r--r--numpy/setupscons.py42
-rw-r--r--numpy/testing/__init__.py12
-rw-r--r--numpy/testing/decorators.py28
-rw-r--r--numpy/testing/noseclasses.py11
-rw-r--r--numpy/testing/nosetester.py84
-rw-r--r--numpy/testing/nulltester.py15
-rw-r--r--numpy/testing/numpytest.py38
-rwxr-xr-xnumpy/testing/print_coercion_tables.py55
-rwxr-xr-xnumpy/testing/setup.py2
-rwxr-xr-xnumpy/testing/setupscons.py16
-rw-r--r--numpy/testing/tests/test_decorators.py10
-rw-r--r--numpy/testing/tests/test_doctesting.py3
-rw-r--r--numpy/testing/tests/test_utils.py4
-rw-r--r--numpy/testing/utils.py168
-rw-r--r--numpy/tests/test_ctypeslib.py12
-rw-r--r--numpy/tests/test_matlib.py4
501 files changed, 156987 insertions, 62536 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 8a09d2fec..b4be4d707 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -104,6 +104,22 @@ available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+ """Module deprecation warning.
+
+ The nose tester turns ordinary Deprecation warnings into test failures.
+ That makes it hard to deprecate whole modules, because they get
+ imported by default. So this is a special Deprecation warning that the
+ nose tester will let pass without making tests fail.
+
+ """
+ pass
+
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
@@ -125,43 +141,48 @@ else:
its source directory; please exit the numpy source tree, and relaunch
your python intepreter from there."""
raise ImportError(msg)
- from version import git_revision as __git_revision__
- from version import version as __version__
+ from .version import git_revision as __git_revision__
+ from .version import version as __version__
- from _import_tools import PackageLoader
+ from ._import_tools import PackageLoader
def pkgload(*packages, **options):
loader = PackageLoader(infunc=True)
return loader(*packages, **options)
- import add_newdocs
- __all__ = ['add_newdocs']
+ from . import add_newdocs
+ __all__ = ['add_newdocs', 'ModuleDeprecationWarning']
pkgload.__doc__ = PackageLoader.__call__.__doc__
- from testing import Tester
+ from .testing import Tester
test = Tester().test
bench = Tester().bench
- import core
- from core import *
- import compat
- import lib
- from lib import *
- import linalg
- import fft
- import polynomial
- import random
- import ctypeslib
- import ma
- import matrixlib as _mat
- from matrixlib import *
+ from . import core
+ from .core import *
+ from . import compat
+ from . import lib
+ from .lib import *
+ from . import linalg
+ from . import fft
+ from . import polynomial
+ from . import random
+ from . import ctypeslib
+ from . import ma
+ from . import matrixlib as _mat
+ from .matrixlib import *
+ from .compat import long
# Make these accessible from numpy name-space
# but not imported in from numpy import *
- from __builtin__ import bool, int, long, float, complex, \
- object, unicode, str
- from core import round, abs, max, min
+ if sys.version_info[0] >= 3:
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
+ else:
+ from __builtin__ import bool, int, float, complex, object, unicode, str
+
+ from .core import round, abs, max, min
__all__.extend(['__version__', 'pkgload', 'PackageLoader',
'show_config'])
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
index c0a901a8d..ba9e021c7 100644
--- a/numpy/_import_tools.py
+++ b/numpy/_import_tools.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
@@ -66,9 +68,9 @@ class PackageLoader(object):
break
else:
try:
- exec 'import %s.info as info' % (package_name)
+ exec('import %s.info as info' % (package_name))
info_modules[package_name] = info
- except ImportError, msg:
+ except ImportError as msg:
self.warn('No scipy-style subpackage %r found in %s. '\
'Ignoring: %s'\
% (package_name,':'.join(self.parent_path), msg))
@@ -87,7 +89,7 @@ class PackageLoader(object):
open(info_file,filedescriptor[1]),
info_file,
filedescriptor)
- except Exception,msg:
+ except Exception as msg:
self.error(msg)
info_module = None
@@ -207,7 +209,7 @@ class PackageLoader(object):
if symbols is None:
symbols = eval('dir(%s)' % (package_name),
frame.f_globals,frame.f_locals)
- symbols = filter(lambda s:not s.startswith('_'),symbols)
+ symbols = [s for s in symbols if not s.startswith('_')]
else:
symbols = [symbol]
@@ -241,7 +243,7 @@ class PackageLoader(object):
frame = self.parent_frame
try:
exec (cmdstr, frame.f_globals,frame.f_locals)
- except Exception,msg:
+ except Exception as msg:
self.error('%s -> failed: %s' % (cmdstr,msg))
return True
else:
@@ -260,13 +262,13 @@ class PackageLoader(object):
def log(self,mess):
if self.verbose>1:
- print >> sys.stderr, str(mess)
+ print(str(mess), file=sys.stderr)
def warn(self,mess):
if self.verbose>=0:
- print >> sys.stderr, str(mess)
+ print(str(mess), file=sys.stderr)
def error(self,mess):
if self.verbose!=-1:
- print >> sys.stderr, str(mess)
+ print(str(mess), file=sys.stderr)
def _get_doc_title(self, info_module):
""" Get the title from a package info.py file.
@@ -335,10 +337,10 @@ class PackageLoaderDebug(PackageLoader):
def _execcmd(self,cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
- print 'Executing',`cmdstr`,'...',
+ print('Executing',repr(cmdstr),'...', end=' ')
sys.stdout.flush()
exec (cmdstr, frame.f_globals,frame.f_locals)
- print 'ok'
+ print('ok')
sys.stdout.flush()
return
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 09effaaf5..5bb800488 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -1,10 +1,14 @@
-# This is only meant to add docs to objects defined in C-extension modules.
-# The purpose is to allow easier editing of the docstrings without
-# requiring a re-compile.
+"""
+This is only meant to add docs to objects defined in C-extension modules.
+The purpose is to allow easier editing of the docstrings without
+requiring a re-compile.
-# NOTE: Many of the methods of ndarray have corresponding functions.
-# If you update these docstrings, please keep also the ones in
-# core/fromnumeric.py, core/defmatrix.py up-to-date.
+NOTE: Many of the methods of ndarray have corresponding functions.
+ If you update these docstrings, please keep also the ones in
+ core/fromnumeric.py, core/defmatrix.py up-to-date.
+
+"""
+from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
@@ -207,7 +211,7 @@ add_newdoc('numpy.core', 'nditer',
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
@@ -1499,7 +1503,7 @@ add_newdoc('numpy.core.multiarray', 'lexsort',
Parameters
----------
- keys : (k,N) array or tuple containing k (N,)-shaped sequences
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
@@ -1594,7 +1598,6 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
Examples
--------
-
Basic examples
>>> np.can_cast(np.int32, np.int64)
@@ -1974,7 +1977,7 @@ add_newdoc('numpy.core', 'einsum',
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
@@ -2611,12 +2614,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
- The data and strides are aligned appropriately for the hardware.
+ The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
-
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
@@ -2646,6 +2648,16 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true.
"""))
@@ -3033,6 +3045,23 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""))
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
+ """
+ a.argpartition(kth, axis=-1, kind='quickselect', order=None)
+
+ Returns the indices that would partition this array.
+
+ Refer to `numpy.argpartition` for full documentation.
+
+ .. versionadded:: 1.8.0
+
+ See Also
+ --------
+ numpy.argpartition : equivalent function
+
+ """))
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
@@ -3043,7 +3072,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
- order : {'C', 'F', 'A', or 'K'}, optional
+ order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
@@ -3079,7 +3108,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
Raises
------
- ComplexWarning :
+ ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
@@ -3106,12 +3135,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
Parameters
----------
- inplace: bool, optional
+ inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
- out: ndarray
+ out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
@@ -3636,6 +3665,32 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""))
+add_newdoc('numpy.core.multiarray', 'may_share_memory',
+ """
+ Determine if two arrays can share memory
+
+ The memory-bounds of a and b are computed. If they overlap then
+ this function returns True. Otherwise, it returns False.
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Parameters
+ ----------
+ a, b : ndarray
+
+ Returns
+ -------
+ out : bool
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
+ """)
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
@@ -4163,6 +4218,58 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""))
+add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
+ """
+ a.partition(kth, axis=-1, kind='introselect', order=None)
+
+ Rearranges the elements in the array in such a way that value of the
+ element in kth position is in the position it would be in a sorted array.
+ All elements smaller than the kth element are moved before this element and
+ all equal or greater are moved behind it. The ordering of the elements in
+ the two partitions is undefined.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ kth : int or sequence of ints
+ Element index to partition by. The kth element value will be in its
+ final sorted position and all smaller elements will be moved before it
+ and all equal or greater elements behind it.
+ The order all elements in the partitions is undefined.
+ If provided with a sequence of kth it will partition all elements
+ indexed by kth of them into their sorted position at once.
+ axis : int, optional
+ Axis along which to sort. Default is -1, which means sort along the
+ last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : list, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. Not all fields need be
+ specified.
+
+ See Also
+ --------
+ numpy.partition : Return a parititioned copy of an array.
+ argpartition : Indirect partition.
+
+ Notes
+ -----
+ See ``np.partition`` for notes on the different algorithms.
+
+ Examples
+ --------
+ >>> a = np.array([3, 4, 2, 1])
+ >>> a.partition(a, 3)
+ >>> a
+ array([2, 1, 3, 4])
+
+ >>> a.partition((1, 3))
+ array([1, 2, 3, 4])
+ """))
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
@@ -4437,10 +4544,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
Parameters
----------
- dtype : data-type, optional
- Data-type descriptor of the returned view, e.g., float32 or int16.
- The default, None, results in the view having the same data-type
- as `a`.
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16. The
+ default, None, results in the view having the same data-type as `a`.
+ This argument can also be specified as an ndarray sub-class, which
+ then specifies the type of the returned object (this is equivalent to
+ setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
@@ -4458,6 +4567,15 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a
+ regular array to a structured array), then the behavior of the view
+ cannot be predicted just from the superficial appearance of ``a`` (shown
+ by ``print(a)``). It also depends on exactly how ``a`` is stored in
+ memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
+ defined as a slice or transpose, etc., the view may give different
+ results.
+
Examples
--------
@@ -4499,6 +4617,22 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
>>> z[0]
(9, 10)
+ Views that change the dtype size (bytes per entry) should normally be
+ avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
+
+ >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
+ >>> y = x[:, 0:2]
+ >>> y
+ array([[1, 2],
+ [4, 5]], dtype=int16)
+ >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: new type not compatible with array.
+ >>> z = y.copy()
+ >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
+ array([[(1, 2)],
+ [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
@@ -5027,7 +5161,6 @@ add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc',
Notes
-----
-
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
@@ -5374,7 +5507,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
::
r = op.identity # op = ufunc
- for i in xrange(len(A)):
+ for i in range(len(A)):
r = op(r, A[i])
return r
@@ -5455,7 +5588,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
- for i in xrange(len(A)):
+ for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
@@ -5635,8 +5768,8 @@ add_newdoc('numpy.core', 'ufunc', ('outer',
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
- for i in xrange(len(A)):
- for j in xrange(len(B)):
+ for i in range(len(A)):
+ for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
@@ -5869,7 +6002,6 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
Examples
--------
-
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
@@ -5977,7 +6109,6 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('names',
Examples
--------
-
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
diff --git a/numpy/build_utils/__init__.py b/numpy/build_utils/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/build_utils/__init__.py
+++ b/numpy/build_utils/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/build_utils/common.py b/numpy/build_utils/common.py
index ee93fc404..3592076a1 100644
--- a/numpy/build_utils/common.py
+++ b/numpy/build_utils/common.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import copy
import binascii
@@ -25,13 +27,14 @@ def pyod(filename):
Parameters
----------
- filename: str
+ filename : str
name of the file to get the dump from.
Returns
-------
- out: seq
+ out : seq
list of lines of od output
+
Note
----
We only implement enough to get the necessary information for long double
diff --git a/numpy/build_utils/waf.py b/numpy/build_utils/waf.py
index 4ef71327c..263640d9e 100644
--- a/numpy/build_utils/waf.py
+++ b/numpy/build_utils/waf.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import re
@@ -5,7 +7,7 @@ import waflib.Configure
import waflib.Tools.c_config
from waflib import Logs, Utils
-from common \
+from .common \
import \
LONG_DOUBLE_REPRESENTATION_SRC, pyod, \
long_double_representation
@@ -166,7 +168,7 @@ int main ()
try:
conf.run_c_code(**kw)
- except conf.errors.ConfigurationError, e:
+ except conf.errors.ConfigurationError as e:
conf.end_msg("failed !")
if waflib.Logs.verbose > 1:
raise
diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py
index 9b4261616..5b371f5c0 100644
--- a/numpy/compat/__init__.py
+++ b/numpy/compat/__init__.py
@@ -8,10 +8,12 @@ extensions, which may be included for the following reasons:
* we may only need a small subset of the copied library/module
"""
-import _inspect
-import py3k
-from _inspect import getargspec, formatargspec
-from py3k import *
+from __future__ import division, absolute_import, print_function
+
+from . import _inspect
+from . import py3k
+from ._inspect import getargspec, formatargspec
+from .py3k import *
__all__ = []
__all__.extend(_inspect.__all__)
diff --git a/numpy/compat/_inspect.py b/numpy/compat/_inspect.py
index 612d1e147..6a499e727 100644
--- a/numpy/compat/_inspect.py
+++ b/numpy/compat/_inspect.py
@@ -3,7 +3,9 @@
We use this instead of upstream because upstream inspect is slow to import, and
significanly contributes to numpy import times. Importing this copy has almost
no overhead.
+
"""
+from __future__ import division, absolute_import, print_function
import types
@@ -125,11 +127,11 @@ def getargspec(func):
"""
if ismethod(func):
- func = func.im_func
+ func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
- args, varargs, varkw = getargs(func.func_code)
- return args, varargs, varkw, func.func_defaults
+ args, varargs, varkw = getargs(func.__code__)
+ return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
@@ -149,8 +151,8 @@ def joinseq(seq):
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
- if type(object) in [types.ListType, types.TupleType]:
- return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
+ if type(object) in [list, tuple]:
+ return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
@@ -209,11 +211,11 @@ if __name__ == '__main__':
def foo(x, y, z=None):
return None
- print inspect.getargs(foo.func_code)
- print getargs(foo.func_code)
+ print(inspect.getargs(foo.__code__))
+ print(getargs(foo.__code__))
- print inspect.getargspec(foo)
- print getargspec(foo)
+ print(inspect.getargspec(foo))
+ print(getargspec(foo))
- print inspect.formatargspec(*inspect.getargspec(foo))
- print formatargspec(*getargspec(foo))
+ print(inspect.formatargspec(*inspect.getargspec(foo)))
+ print(formatargspec(*getargspec(foo)))
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index 0a03929be..e7ab9bf0f 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -2,17 +2,22 @@
Python 3 compatibility tools.
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
- 'asstr', 'open_latin1']
+ 'asstr', 'open_latin1', 'long', 'basestring', 'sixu']
import sys
if sys.version_info[0] >= 3:
import io
- bytes = bytes
+
+ long = int
+ integer_types = (int,)
+ basestring = str
unicode = str
+ bytes = bytes
def asunicode(s):
if isinstance(s, bytes):
@@ -35,15 +40,23 @@ if sys.version_info[0] >= 3:
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
+ def sixu(s):
+ return s
+
strchar = 'U'
+
else:
bytes = str
+ long = long
+ basestring = basestring
unicode = unicode
+ integer_types = (int, long)
asbytes = str
asstr = str
strchar = 'S'
+
def isfileobj(f):
return isinstance(f, file)
@@ -55,6 +68,10 @@ else:
def open_latin1(filename, mode='r'):
return open(filename, mode=mode)
+ def sixu(s):
+ return unicode(s, 'unicode_escape')
+
+
def getexception():
return sys.exc_info()[1]
diff --git a/numpy/compat/setup.py b/numpy/compat/setup.py
index 4e0781085..ce0ad8c72 100644
--- a/numpy/compat/setup.py
+++ b/numpy/compat/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/compat/setupscons.py b/numpy/compat/setupscons.py
deleted file mode 100644
index e518245b2..000000000
--- a/numpy/compat/setupscons.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-import os.path
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('compat',parent_package,top_path)
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/core/SConscript b/numpy/core/SConscript
deleted file mode 100644
index 952544a25..000000000
--- a/numpy/core/SConscript
+++ /dev/null
@@ -1,531 +0,0 @@
-# Last Change: Sun Apr 26 05:00 PM 2009 J
-# vim:syntax=python
-import os
-import sys
-from os.path import join as pjoin, basename as pbasename, dirname as pdirname
-from copy import deepcopy
-
-from numscons import get_pythonlib_dir
-from numscons import GetNumpyEnvironment
-from numscons import CheckCBLAS
-from numscons import write_info
-
-from code_generators.numpy_api import \
- multiarray_api as multiarray_api_dict, \
- ufunc_api as ufunc_api_dict
-
-from setup_common import *
-from scons_support import CheckBrokenMathlib, define_no_smp, \
- check_mlib, check_mlibs, is_npy_no_signal, CheckInline
-from scons_support import array_api_gen_bld, ufunc_api_gen_bld, template_bld, \
- umath_bld, CheckGCC4, check_api_version, \
- CheckLongDoubleRepresentation
-
-import SCons
-
-# Set to True to enable multiple file compilations (experimental)
-try:
- os.environ['NPY_SEPARATE_COMPILATION']
- ENABLE_SEPARATE_COMPILATION = True
-except KeyError:
- ENABLE_SEPARATE_COMPILATION = False
-try:
- os.environ['NPY_BYPASS_SINGLE_EXTENDED']
- BYPASS_SINGLE_EXTENDED = True
-except KeyError:
- BYPASS_SINGLE_EXTENDED = False
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Append(CPPPATH = env["PYEXTCPPPATH"])
-if os.name == 'nt':
- # NT needs the pythonlib to run any code importing Python.h, including
- # simple code using only typedef and so on, so we need it for configuration
- # checks
- env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-# Check whether we have a mismatch between the set C API VERSION and the
-# actual C API VERSION
-check_api_version(C_API_VERSION)
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckBrokenMathlib' : CheckBrokenMathlib,
- 'CheckCBLAS' : CheckCBLAS, 'CheckInline': CheckInline, 'CheckGCC4' : CheckGCC4,
- 'CheckLongDoubleRepresentation': CheckLongDoubleRepresentation},
- config_h = pjoin('config.h'))
-
-# numpyconfig_sym will keep the values of some configuration variables, the one
-# needed for the public numpy API.
-
-# Convention: list of tuples (definition, value). value:
-# - 0: #undef definition
-# - 1: #define definition
-# - string: #define definition value
-numpyconfig_sym = []
-
-#---------------
-# Checking Types
-#---------------
-if not config.CheckHeader("Python.h"):
- errmsg = []
- for line in config.GetLastError():
- errmsg.append("%s " % line)
- print """
-Error: Python.h header cannot be compiled (or cannot be found).
-On linux, check that you have python-dev/python-devel packages. On windows,
-check that you have he platform SDK. You may also use unsupported cflags.
-Configuration error log says: \n\n%s""" % ''.join(errmsg)
- Exit(-1)
-
-st = config.CheckHeader("endian.h")
-if st:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_ENDIAN_H', '#define NPY_HAVE_ENDIAN_H 1'))
-else:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_ENDIAN_H', ''))
-
-def check_type(type, include = None):
- st = config.CheckTypeSize(type, includes = include)
- type = type.replace(' ', '_')
- if st:
- numpyconfig_sym.append(('SIZEOF_%s' % type.upper(), '%d' % st))
- else:
- numpyconfig_sym.append(('SIZEOF_%s' % type.upper(), 0))
-
-for type in ('short', 'int', 'long'):
- # SIZEOF_LONG defined on darwin
- if type == "long":
- if not config.CheckDeclaration("SIZEOF_LONG", includes="#include <Python.h>"):
- check_type(type)
- else:
- numpyconfig_sym.append(('SIZEOF_LONG', 'SIZEOF_LONG'))
- else:
- check_type(type)
-
-for type in ('float', 'double', 'long double'):
- sz = config.CheckTypeSize(type)
- numpyconfig_sym.append(('SIZEOF_%s' % type2def(type), str(sz)))
-
- # Compute size of corresponding complex type: used to check that our
- # definition is binary compatible with C99 complex type (check done at
- # build time in npy_common.h)
- complex_def = "struct {%s __x; %s __y;}" % (type, type)
- sz = config.CheckTypeSize(complex_def)
- numpyconfig_sym.append(('SIZEOF_COMPLEX_%s' % type2def(type), str(sz)))
-
-if sys.platform != 'darwin':
- tp = config.CheckLongDoubleRepresentation()
- config.Define("HAVE_LDOUBLE_%s" % tp, 1,
- "Define for arch-specific long double representation")
-
-for type in ('Py_intptr_t',):
- check_type(type, include = "#include <Python.h>\n")
-
-# We check declaration AND type because that's how distutils does it.
-if config.CheckDeclaration('PY_LONG_LONG', includes = '#include <Python.h>\n'):
- st = config.CheckTypeSize('PY_LONG_LONG',
- includes = '#include <Python.h>\n')
- assert not st == 0
- numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG',
- '#define NPY_SIZEOF_LONGLONG %d' % st))
- numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG',
- '#define NPY_SIZEOF_PY_LONG_LONG %d' % st))
-else:
- numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', ''))
- numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG', ''))
-
-if not config.CheckDeclaration('CHAR_BIT', includes= '#include <Python.h>\n'):
- raise RuntimeError(\
-"""Config wo CHAR_BIT is not supported with scons: please contact the
-maintainer (cdavid)""")
-
-#----------------------
-# Checking signal stuff
-#----------------------
-if is_npy_no_signal():
- numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', '#define NPY_NO_SIGNAL\n'))
- config.Define('__NPY_PRIVATE_NO_SIGNAL',
- comment = "define to 1 to disable SMP support ")
-else:
- numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', ''))
-
-#---------------------
-# Checking SMP option
-#---------------------
-if define_no_smp():
- nosmp = 1
-else:
- nosmp = 0
-numpyconfig_sym.append(('NPY_NO_SMP', nosmp))
-
-#----------------------------------------------
-# Check whether we can use C99 printing formats
-#----------------------------------------------
-if config.CheckDeclaration(('PRIdPTR'), includes = '#include <inttypes.h>'):
- numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1'))
-else:
- numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', ''))
-
-#----------------------
-# Checking the mathlib
-#----------------------
-mlibs = [[], ['m'], ['cpml']]
-mathlib = os.environ.get('MATHLIB')
-if mathlib:
- mlibs.insert(0, mathlib)
-
-mlib = check_mlibs(config, mlibs)
-
-# XXX: this is ugly: mathlib has nothing to do in a public header file
-numpyconfig_sym.append(('MATHLIB', ','.join(mlib)))
-
-#----------------------------------
-# Checking the math funcs available
-#----------------------------------
-# Function to check:
-mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl',
- 'rint', 'trunc')
-
-# Set value to 1 for each defined function (in math lib)
-mfuncs_defined = dict([(f, 0) for f in mfuncs])
-
-# Check for mandatory funcs: we barf if a single one of those is not there
-if not config.CheckFuncsAtOnce(MANDATORY_FUNCS):
- raise SystemError("One of the required function to build numpy is not"
- " available (the list is %s)." % str(MANDATORY_FUNCS))
-
-# Standard functions which may not be available and for which we have a
-# replacement implementation
-#
-def check_funcs(funcs):
- # Use check_funcs_once first, and if it does not work, test func per
- # func. Return success only if all the functions are available
- st = config.CheckFuncsAtOnce(funcs)
- if not st:
- # Global check failed, check func per func
- for f in funcs:
- st = config.CheckFunc(f, language = 'C')
-
-for f in OPTIONAL_STDFUNCS_MAYBE:
- if config.CheckDeclaration(fname2def(f),
- includes="#include <Python.h>\n#include <math.h>"):
- OPTIONAL_STDFUNCS.remove(f)
-check_funcs(OPTIONAL_STDFUNCS)
-
-# C99 functions: float and long double versions
-if not BYPASS_SINGLE_EXTENDED:
- check_funcs(C99_FUNCS_SINGLE)
- check_funcs(C99_FUNCS_EXTENDED)
-
-# Normally, isnan and isinf are macro (C99), but some platforms only have
-# func, or both func and macro version. Check for macro only, and define
-# replacement ones if not found.
-# Note: including Python.h is necessary because it modifies some math.h
-# definitions
-for f in ["isnan", "isinf", "signbit", "isfinite"]:
- includes = """\
-#include <Python.h>
-#include <math.h>
-"""
- st = config.CheckDeclaration(f, includes=includes)
- if st:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
- else:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), ''))
-
-inline = config.CheckInline()
-config.Define('inline', inline)
-
-
-if ENABLE_SEPARATE_COMPILATION:
- config.Define("ENABLE_SEPARATE_COMPILATION", 1)
- numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
-else:
- numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
-
-#-----------------------------
-# Checking for complex support
-#-----------------------------
-if config.CheckHeader('complex.h'):
- numpyconfig_sym.append(('DEFINE_NPY_USE_C99_COMPLEX', '#define NPY_USE_C99_COMPLEX 1'))
-
- for t in C99_COMPLEX_TYPES:
- st = config.CheckType(t, includes='#include <complex.h>')
- if st:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t),
- '#define NPY_HAVE_%s' % type2def(t)))
- else:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t), ''))
-
- def check_prec(prec):
- flist = [f + prec for f in C99_COMPLEX_FUNCS]
- st = config.CheckFuncsAtOnce(flist)
- if not st:
- # Global check failed, check func per func
- for f in flist:
- config.CheckFunc(f, language='C')
-
- check_prec('')
- check_prec('f')
- check_prec('l')
-
-else:
- numpyconfig_sym.append(('DEFINE_NPY_USE_C99_COMPLEX', ''))
- for t in C99_COMPLEX_TYPES:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s' % type2def(t), ''))
-
-def visibility_define():
- if config.CheckGCC4():
- return '__attribute__((visibility("hidden")))'
- else:
- return ''
-
-numpyconfig_sym.append(('VISIBILITY_HIDDEN', visibility_define()))
-
-# Add the C API/ABI versions
-numpyconfig_sym.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
-numpyconfig_sym.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
-
-# Check whether we need our own wide character support
-if not config.CheckDeclaration('Py_UNICODE_WIDE', includes='#include <Python.h>'):
- PYTHON_HAS_UNICODE_WIDE = True
-else:
- PYTHON_HAS_UNICODE_WIDE = False
-
-#-------------------------------------------------------
-# Define the function PyOS_ascii_strod if not available
-#-------------------------------------------------------
-if not config.CheckDeclaration('PyOS_ascii_strtod',
- includes = "#include <Python.h>"):
- if config.CheckFunc('strtod'):
- config.Define('PyOS_ascii_strtod', 'strtod',
- "Define to a function to use as a replacement for "\
- "PyOS_ascii_strtod if not available in python header")
-
-#------------------------------------
-# DISTUTILS Hack on AMD64 on windows
-#------------------------------------
-# XXX: this is ugly
-if sys.platform=='win32' or os.name=='nt':
- from distutils.msvccompiler import get_build_architecture
- a = get_build_architecture()
- print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \
- (a, os.name, sys.platform)
- if a == 'AMD64':
- distutils_use_sdk = 1
- config.Define('DISTUTILS_USE_SDK', distutils_use_sdk,
- "define to 1 to disable SMP support ")
-
- if a == "Intel":
- config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1,
- "define to 1 to force long double format string to the" \
- " same as double (Lg -> g)")
-#--------------
-# Checking Blas
-#--------------
-if config.CheckCBLAS():
- build_blasdot = 1
-else:
- build_blasdot = 0
-
-config.config_h_text += """
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
-"""
-
-config.Finish()
-write_info(env)
-
-#==========
-# Build
-#==========
-
-# List of headers which need to be "installed " into the build directory for
-# proper in-place build support
-generated_headers = []
-
-#---------------------------------------
-# Generate the public configuration file
-#---------------------------------------
-config_dict = {}
-# XXX: this is ugly, make the API for config.h and numpyconfig.h similar
-for key, value in numpyconfig_sym:
- config_dict['@%s@' % key] = str(value)
-env['SUBST_DICT'] = config_dict
-
-include_dir = 'include/numpy'
-target = env.SubstInFile(pjoin(include_dir, '_numpyconfig.h'),
- pjoin(include_dir, '_numpyconfig.h.in'))
-generated_headers.append(target[0])
-
-env['CONFIG_H_GEN'] = numpyconfig_sym
-
-#---------------------------
-# Builder for generated code
-#---------------------------
-env.Append(BUILDERS = {'GenerateMultiarrayApi' : array_api_gen_bld,
- 'GenerateUfuncApi' : ufunc_api_gen_bld,
- 'GenerateFromTemplate' : template_bld,
- 'GenerateUmath' : umath_bld})
-
-#------------------------
-# Generate generated code
-#------------------------
-scalartypes_src = env.GenerateFromTemplate(
- pjoin('src', 'multiarray', 'scalartypes.c.src'))
-umath_funcs_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'funcs.inc.src'))
-umath_loops_src = env.GenerateFromTemplate(pjoin('src', 'umath', 'loops.c.src'))
-arraytypes_src = env.GenerateFromTemplate(
- pjoin('src', 'multiarray', 'arraytypes.c.src'))
-nditer_src = env.GenerateFromTemplate(
- pjoin('src', 'multiarray', 'nditer_templ.c.src'))
-lowlevel_strided_loops_src = env.GenerateFromTemplate(
- pjoin('src', 'multiarray', 'lowlevel_strided_loops.c.src'))
-einsum_src = env.GenerateFromTemplate(pjoin('src', 'multiarray', 'einsum.c.src'))
-umath_tests_src = env.GenerateFromTemplate(pjoin('src', 'umath',
- 'umath_tests.c.src'))
-multiarray_tests_src = env.GenerateFromTemplate(pjoin('src', 'multiarray',
- 'multiarray_tests.c.src'))
-scalarmathmodule_src = env.GenerateFromTemplate(
- pjoin('src', 'scalarmathmodule.c.src'))
-
-umath = env.GenerateUmath('__umath_generated',
- pjoin('code_generators', 'generate_umath.py'))
-
-multiarray_api = env.GenerateMultiarrayApi('include/numpy/multiarray_api',
- [SCons.Node.Python.Value(d) for d in multiarray_api_dict])
-generated_headers.append(multiarray_api[0])
-
-ufunc_api = env.GenerateUfuncApi('include/numpy/ufunc_api',
- [SCons.Node.Python.Value(d) for d in ufunc_api_dict])
-generated_headers.append(ufunc_api[0])
-
-# include/numpy is added for compatibility reasons with distutils: this is
-# needed for __multiarray_api.c and __ufunc_api.c included from multiarray and
-# ufunc.
-env.Prepend(CPPPATH = ['src/private', 'include', '.', 'include/numpy'])
-
-# npymath core lib
-npymath_src = [env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math.c.src')),
- env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math_complex.c.src')),
- env.GenerateFromTemplate(pjoin('src', 'npymath', 'ieee754.c.src')),
- pjoin('src', 'npymath', 'halffloat.c')]
-env.DistutilsInstalledStaticExtLibrary("npymath", npymath_src, install_dir='lib')
-env.Prepend(LIBS=["npymath"])
-env.Prepend(LIBPATH=["."])
-
-subst_dict = {'@prefix@': '$distutils_install_prefix',
- '@pkgname@': 'numpy.core', '@sep@': os.path.sep}
-npymath_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'npymath.ini'),
- 'npymath.ini.in', SUBST_DICT=subst_dict)
-
-subst_dict = {'@posix_mathlib@': " ".join(['-l%s' % l for l in mlib]),
- '@msvc_mathlib@': " ".join(['%s.mlib' % l for l in mlib])}
-mlib_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'mlib.ini'),
- 'mlib.ini.in', SUBST_DICT=subst_dict)
-env.Install('$distutils_installdir/lib/npy-pkg-config', mlib_ini)
-env.Install('$distutils_installdir/lib/npy-pkg-config', npymath_ini)
-
-# npysort core lib
-npysort_src = [env.GenerateFromTemplate(pjoin('src', 'npysort', 'quicksort.c.src')),
- env.GenerateFromTemplate(pjoin('src', 'npysort', 'mergesort.c.src')),
- env.GenerateFromTemplate(pjoin('src', 'npysort', 'heapsort.c.src'))]
-env.StaticExtLibrary("npysort", npysort_src)
-env.Prepend(LIBS=["npysort"])
-env.Prepend(LIBPATH=["."])
-
-#-----------------
-# Build multiarray
-#-----------------
-if ENABLE_SEPARATE_COMPILATION:
- multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule.c'),
- pjoin('src', 'multiarray', 'hashdescr.c'),
- pjoin('src', 'multiarray', 'arrayobject.c'),
- pjoin('src', 'multiarray', 'array_assign.c'),
- pjoin('src', 'multiarray', 'array_assign_scalar.c'),
- pjoin('src', 'multiarray', 'array_assign_array.c'),
- pjoin('src', 'multiarray', 'datetime.c'),
- pjoin('src', 'multiarray', 'datetime_strings.c'),
- pjoin('src', 'multiarray', 'datetime_busday.c'),
- pjoin('src', 'multiarray', 'datetime_busdaycal.c'),
- pjoin('src', 'multiarray', 'numpyos.c'),
- pjoin('src', 'multiarray', 'flagsobject.c'),
- pjoin('src', 'multiarray', 'descriptor.c'),
- pjoin('src', 'multiarray', 'iterators.c'),
- pjoin('src', 'multiarray', 'mapping.c'),
- pjoin('src', 'multiarray', 'number.c'),
- pjoin('src', 'multiarray', 'getset.c'),
- pjoin('src', 'multiarray', 'sequence.c'),
- pjoin('src', 'multiarray', 'methods.c'),
- pjoin('src', 'multiarray', 'ctors.c'),
- pjoin('src', 'multiarray', 'convert_datatype.c'),
- pjoin('src', 'multiarray', 'convert.c'),
- pjoin('src', 'multiarray', 'shape.c'),
- pjoin('src', 'multiarray', 'item_selection.c'),
- pjoin('src', 'multiarray', 'calculation.c'),
- pjoin('src', 'multiarray', 'common.c'),
- pjoin('src', 'multiarray', 'refcount.c'),
- pjoin('src', 'multiarray', 'conversion_utils.c'),
- pjoin('src', 'multiarray', 'usertypes.c'),
- pjoin('src', 'multiarray', 'buffer.c'),
- pjoin('src', 'multiarray', 'numpymemoryview.c'),
- pjoin('src', 'multiarray', 'scalarapi.c'),
- pjoin('src', 'multiarray', 'nditer_api.c'),
- pjoin('src', 'multiarray', 'nditer_constr.c'),
- pjoin('src', 'multiarray', 'nditer_pywrap.c'),
- pjoin('src', 'multiarray', 'dtype_transfer.c'),
- pjoin("src", "multiarray", "ucsnarrow.c")]
- multiarray_src.extend(arraytypes_src)
- multiarray_src.extend(scalartypes_src)
- multiarray_src.extend(lowlevel_strided_loops_src)
- multiarray_src.extend(nditer_src)
- multiarray_src.extend(einsum_src)
-else:
- multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule_onefile.c')]
-multiarray = env.DistutilsPythonExtension('multiarray', source = multiarray_src)
-env.DistutilsPythonExtension('multiarray_tests', source=multiarray_tests_src)
-
-#-------------------
-# Build umath module
-#-------------------
-if ENABLE_SEPARATE_COMPILATION:
- umathmodule_src.extend([pjoin('src', 'umath', 'ufunc_object.c')])
- umathmodule_src.extend([pjoin('src', 'umath', 'ufunc_type_resolution.c')])
- umathmodule_src.extend([pjoin('src', 'multiarray', 'reduction.c')]),
- umathmodule_src.extend(umath_loops_src)
-else:
- umathmodule_src = [pjoin('src', 'umath', 'umathmodule_onefile.c')]
-umathmodule = env.DistutilsPythonExtension('umath', source = umathmodule_src)
-
-#------------------------
-# Build scalarmath module
-#------------------------
-scalarmathmodule = env.DistutilsPythonExtension('scalarmath',
- source = scalarmathmodule_src)
-
-#------------------------
-# Build scalarmath module
-#------------------------
-umath_tests = env.DistutilsPythonExtension('umath_tests',
- source=umath_tests_src)
-
-#----------------------
-# Build _dotblas module
-#----------------------
-if build_blasdot:
- dotblas_src = [pjoin('blasdot', i) for i in ['_dotblas.c']]
- # because _dotblas does #include CBLAS_HEADER instead of #include
- # "cblas.h", scons does not detect the dependency
- # XXX: PythonExtension builder does not take the Depends on extension into
- # account for some reason, so we first build the object, with forced
- # dependency, and then builds the extension. This is more likely a bug in
- # our PythonExtension builder, but I cannot see how to solve it.
- dotblas_o = env.PythonObject('_dotblas', source = dotblas_src)
- env.Depends(dotblas_o, pjoin("blasdot", "cblas.h"))
- dotblas = env.DistutilsPythonExtension('_dotblas', dotblas_o)
-
-# "Install" the header in the build directory, so that in-place build works
-for h in generated_headers:
- env.Install(pjoin('$distutils_installdir', include_dir), h)
diff --git a/numpy/core/SConstruct b/numpy/core/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/core/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 5d1599111..50af6e548 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -1,35 +1,36 @@
+from __future__ import division, absolute_import, print_function
-from info import __doc__
+from .info import __doc__
from numpy.version import version as __version__
-import multiarray
-import umath
-import _internal # for freeze programs
-import numerictypes as nt
+from . import multiarray
+from . import umath
+from . import _internal # for freeze programs
+from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
-import numeric
-from numeric import *
-import fromnumeric
-from fromnumeric import *
-import defchararray as char
-import records as rec
-from records import *
-from memmap import *
-from defchararray import chararray
-import scalarmath
-import function_base
-from function_base import *
-import machar
-from machar import *
-import getlimits
-from getlimits import *
-import shape_base
-from shape_base import *
+from . import numeric
+from .numeric import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import defchararray as char
+from . import records as rec
+from .records import *
+from .memmap import *
+from .defchararray import chararray
+from . import scalarmath
+from . import function_base
+from .function_base import *
+from . import machar
+from .machar import *
+from . import getlimits
+from .getlimits import *
+from . import shape_base
+from .shape_base import *
del nt
-from fromnumeric import amax as max, amin as min, \
+from .fromnumeric import amax as max, amin as min, \
round_ as round
-from numeric import absolute as abs
+from .numeric import absolute as abs
__all__ = ['char','rec','memmap']
__all__ += numeric.__all__
@@ -61,10 +62,10 @@ def _ufunc_reduce(func):
import sys
-if sys.version_info[0] < 3:
- import copy_reg as copyreg
-else:
+if sys.version_info[0] >= 3:
import copyreg
+else:
+ import copy_reg as copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 686b773f2..8046de149 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -1,5 +1,10 @@
-#A place for code to be called from C-code
-# that implements more complicated stuff.
+"""
+A place for code to be called from core C-code.
+
+Some things are more easily handled Python.
+
+"""
+from __future__ import division, absolute_import, print_function
import re
import sys
@@ -13,9 +18,9 @@ else:
_nbo = asbytes('>')
def _makenames_list(adict, align):
- from multiarray import dtype
+ from .multiarray import dtype
allfields = []
- fnames = adict.keys()
+ fnames = list(adict.keys())
for fname in fnames:
obj = adict[fname]
n = len(obj)
@@ -47,7 +52,7 @@ def _makenames_list(adict, align):
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
- from multiarray import dtype
+ from .multiarray import dtype
try:
names = adict[-1]
except KeyError:
@@ -125,7 +130,7 @@ def _array_descr(descriptor):
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
- from multiarray import ndarray
+ from .multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
@@ -189,7 +194,7 @@ def _commastring(astr):
return result
def _getintp_ctype():
- from multiarray import dtype
+ from .multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
@@ -285,7 +290,7 @@ def _newnames(datatype, order):
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
- from multiarray import empty, dtype, array
+ from .multiarray import empty, dtype, array
dt = ary.dtype
names = [name for name in fields if name in dt.names]
@@ -409,7 +414,7 @@ def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
itemsize = 1
if spec[0].isdigit():
j = 1
- for j in xrange(1, len(spec)):
+ for j in range(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
@@ -500,7 +505,7 @@ def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
offset += extra_offset
# Check if this was a simple 1-item type
- if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
+ if len(fields) == 1 and not explicit_name and fields['f0'][1] == 0 \
and not is_subdtype:
ret = fields['f0'][0]
else:
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index d3c150ac8..27e445a54 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -1,9 +1,16 @@
-# Array methods which are called by the both the C-code for the method
-# and the Python code for the NumPy-namespace function
+"""
+Array methods which are called by the both the C-code for the method
+and the Python code for the NumPy-namespace function
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
+from numpy.core import numerictypes as nt
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
@@ -31,7 +38,7 @@ def _all(a, axis=None, dtype=None, out=None, keepdims=False):
def _count_reduce_items(arr, axis):
if axis is None:
- axis = tuple(xrange(arr.ndim))
+ axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
@@ -42,58 +49,65 @@ def _count_reduce_items(arr, axis):
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
- # Upgrade bool, unsigned int, and int to float64
- if dtype is None and arr.dtype.kind in ['b','u','i']:
- ret = um.add.reduce(arr, axis=axis, dtype='f8',
- out=out, keepdims=keepdims)
- else:
- ret = um.add.reduce(arr, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
rcount = _count_reduce_items(arr, axis)
+ # Make this warning show up first
+ if rcount == 0:
+ warnings.warn("Mean of empty slice.", RuntimeWarning)
+
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+
+ ret = um.add.reduce(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if isinstance(ret, mu.ndarray):
- ret = um.true_divide(ret, rcount,
- out=ret, casting='unsafe', subok=False)
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
else:
- ret = ret / float(rcount)
+ ret = ret.dtype.type(ret / rcount)
+
return ret
-def _var(a, axis=None, dtype=None, out=None, ddof=0,
- keepdims=False):
+def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
- # First compute the mean, saving 'rcount' for reuse later
- if dtype is None and arr.dtype.kind in ['b','u','i']:
- arrmean = um.add.reduce(arr, axis=axis, dtype='f8', keepdims=True)
- else:
- arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
rcount = _count_reduce_items(arr, axis)
+ # Make this warning show up on top.
+ if ddof >= rcount:
+ warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+
+ # Compute the mean.
+ # Note that if dtype is not of inexact type then arraymean will
+ # not be either.
+ arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
- arrmean = um.true_divide(arrmean, rcount,
- out=arrmean, casting='unsafe', subok=False)
+ arrmean = um.true_divide(
+ arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
- arrmean = arrmean / float(rcount)
+ arrmean = arrmean.dtype.type(arrmean / rcount)
- # arr - arrmean
+ # Compute sum of squared deviations from mean
+ # Note that x may not be inexact
x = arr - arrmean
-
- # (arr - arrmean) ** 2
- if arr.dtype.kind == 'c':
+ if issubclass(arr.dtype.type, nt.complexfloating):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
-
- # add.reduce((arr - arrmean) ** 2, axis)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
- # add.reduce((arr - arrmean) ** 2, axis) / (n - ddof)
- if not keepdims and isinstance(rcount, mu.ndarray):
- rcount = rcount.squeeze(axis=axis)
- rcount -= ddof
+ # Compute degrees of freedom and make sure it is not negative.
+ rcount = max([rcount - ddof, 0])
+
+ # divide by degrees of freedom
if isinstance(ret, mu.ndarray):
- ret = um.true_divide(ret, rcount,
- out=ret, casting='unsafe', subok=False)
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
else:
- ret = ret / float(rcount)
+ ret = ret.dtype.type(ret / rcount)
return ret
@@ -104,6 +118,7 @@ def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
else:
- ret = um.sqrt(ret)
+ ret = ret.dtype.type(um.sqrt(ret))
return ret
+
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index f3add4463..ad6a5d074 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -1,7 +1,10 @@
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
@@ -13,11 +16,18 @@ __docformat__ = 'restructuredtext'
# and by Travis Oliphant 2005-8-22 for numpy
import sys
-import numerictypes as _nt
-from umath import maximum, minimum, absolute, not_equal, isnan, isinf
-from multiarray import format_longfloat, datetime_as_string, datetime_data
-from fromnumeric import ravel
+from functools import reduce
+from . import numerictypes as _nt
+from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
+from .multiarray import format_longfloat, datetime_as_string, datetime_data
+from .fromnumeric import ravel
+if sys.version_info[0] >= 3:
+ _MAXINT = sys.maxsize
+ _MININT = -sys.maxsize - 1
+else:
+ _MAXINT = sys.maxint
+ _MININT = -sys.maxint - 1
def product(x, y): return x*y
@@ -31,8 +41,6 @@ _nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
-if sys.version_info[0] >= 3:
- from functools import reduce
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
@@ -194,7 +202,7 @@ def get_printoptions():
return d
def _leading_trailing(a):
- import numeric as _nc
+ from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
@@ -321,7 +329,7 @@ def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
return lst
def _convert_arrays(obj):
- import numeric as _nc
+ from . import numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
@@ -396,7 +404,8 @@ def array2string(a, max_line_width=None, precision=None,
Raises
------
- TypeError : if a callable in `formatter` does not return a string.
+ TypeError
+ if a callable in `formatter` does not return a string.
See Also
--------
@@ -477,14 +486,14 @@ def _formatArray(a, format_function, rank, max_line_len,
if rank == 1:
s = ""
line = next_line_prefix
- for i in xrange(leading_items):
+ for i in range(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
- for i in xrange(trailing_items, 1, -1):
+ for i in range(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
@@ -495,7 +504,7 @@ def _formatArray(a, format_function, rank, max_line_len,
else:
s = '['
sep = separator.rstrip()
- for i in xrange(leading_items):
+ for i in range(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
@@ -506,7 +515,7 @@ def _formatArray(a, format_function, rank, max_line_len,
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
- for i in xrange(trailing_items, 1, -1):
+ for i in range(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
@@ -536,9 +545,9 @@ class FloatFormat(object):
pass
def fillFormat(self, data):
- import numeric as _nc
- errstate = _nc.seterr(all='ignore')
- try:
+ from . import numeric as _nc
+
+ with _nc.errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
@@ -553,8 +562,6 @@ class FloatFormat(object):
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
- finally:
- _nc.seterr(**errstate)
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
@@ -589,9 +596,9 @@ class FloatFormat(object):
self.format = format
def __call__(self, x, strip_zeros=True):
- import numeric as _nc
- err = _nc.seterr(invalid='ignore')
- try:
+ from . import numeric as _nc
+
+ with _nc.errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
@@ -605,8 +612,6 @@ class FloatFormat(object):
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
- finally:
- _nc.seterr(**err)
s = self.format % x
if self.large_exponent:
@@ -630,8 +635,6 @@ def _digits(x, precision, format):
return precision - len(s) + len(z)
-_MAXINT = sys.maxint
-_MININT = -sys.maxint-1
class IntegerFormat(object):
def __init__(self, data):
try:
diff --git a/numpy/core/bento.info b/numpy/core/bento.info
index 9c8476eee..bda64d474 100644
--- a/numpy/core/bento.info
+++ b/numpy/core/bento.info
@@ -12,7 +12,8 @@ Library:
Sources:
src/npysort/quicksort.c.src,
src/npysort/mergesort.c.src,
- src/npysort/heapsort.c.src
+ src/npysort/heapsort.c.src,
+ src/npysort/selection.c.src
Extension: multiarray
Sources:
src/multiarray/multiarraymodule_onefile.c
diff --git a/numpy/core/blasdot/_dotblas.c b/numpy/core/blasdot/_dotblas.c
index c73dd6a47..ae6b1b1e5 100644
--- a/numpy/core/blasdot/_dotblas.c
+++ b/numpy/core/blasdot/_dotblas.c
@@ -14,12 +14,6 @@
#include <stdio.h>
-#if (PY_VERSION_HEX < 0x02060000)
-#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
-#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt)
-#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size)
-#endif
-
static char module_doc[] =
"This module provides a BLAS optimized\nmatrix multiply, inner product and dot for numpy arrays";
@@ -769,8 +763,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa
* We may be able to handle single-segment arrays here
* using appropriate values of Order, Trans1, and Trans2.
*/
-
- if (!PyArray_ISCONTIGUOUS(ap2)) {
+ if (!PyArray_IS_C_CONTIGUOUS(ap2) && !PyArray_IS_F_CONTIGUOUS(ap2)) {
PyObject *new = PyArray_Copy(ap2);
Py_DECREF(ap2);
@@ -779,7 +772,7 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa
goto fail;
}
}
- if (!PyArray_ISCONTIGUOUS(ap1)) {
+ if (!PyArray_IS_C_CONTIGUOUS(ap1) && !PyArray_IS_F_CONTIGUOUS(ap1)) {
PyObject *new = PyArray_Copy(ap1);
Py_DECREF(ap1);
@@ -800,6 +793,18 @@ dotblas_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject* kwa
lda = (PyArray_DIM(ap1, 1) > 1 ? PyArray_DIM(ap1, 1) : 1);
ldb = (PyArray_DIM(ap2, 1) > 1 ? PyArray_DIM(ap2, 1) : 1);
ldc = (PyArray_DIM(ret, 1) > 1 ? PyArray_DIM(ret, 1) : 1);
+
+ /*
+ * Avoid temporary copies for arrays in Fortran order
+ */
+ if (PyArray_IS_F_CONTIGUOUS(ap1)) {
+ Trans1 = CblasTrans;
+ lda = (PyArray_DIM(ap1, 0) > 1 ? PyArray_DIM(ap1, 0) : 1);
+ }
+ if (PyArray_IS_F_CONTIGUOUS(ap2)) {
+ Trans2 = CblasTrans;
+ ldb = (PyArray_DIM(ap2, 0) > 1 ? PyArray_DIM(ap2, 0) : 1);
+ }
if (typenum == NPY_DOUBLE) {
cblas_dgemm(Order, Trans1, Trans2,
L, N, M,
diff --git a/numpy/core/bscript b/numpy/core/bscript
index d31faa186..ee5543354 100644
--- a/numpy/core/bscript
+++ b/numpy/core/bscript
@@ -15,9 +15,6 @@ waflib.Logs.verbose = 1
# context.
import numpy.build_utils.waf
-from numpy.distutils.conv_template \
- import \
- process_str as process_c_str
from code_generators.numpy_api \
import \
multiarray_api, ufunc_api
@@ -30,11 +27,8 @@ from setup_common \
C99_FUNCS_SINGLE, C99_COMPLEX_TYPES, C99_COMPLEX_FUNCS, \
MANDATORY_FUNCS, C_ABI_VERSION, C_API_VERSION
-try:
- val = os.environ['NPY_SEPARATE_COMPILATION']
- ENABLE_SEPARATE_COMPILATION = (val != "0")
-except KeyError:
- ENABLE_SEPARATE_COMPILATION = False
+ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
NUMPYCONFIG_SYM = []
@@ -43,6 +37,12 @@ if ENABLE_SEPARATE_COMPILATION:
NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
else:
NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
+
+if NPY_RELAXED_STRIDES_CHECKING:
+ NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', '#define NPY_RELAXED_STRIDES_CHECKING 1'))
+else:
+ NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', ''))
+
NUMPYCONFIG_SYM.append(('VISIBILITY_HIDDEN', '__attribute__((visibility("hidden")))'))
NUMPYCONFIG_SYM.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
@@ -94,6 +94,19 @@ def write_numpy_config(conf):
onode = conf.bldnode.make_node(node.path_from(conf.srcnode)).change_ext("")
onode.write(cnt)
+def write_numpy_inifiles(conf):
+ subst_dict = dict([("@sep@", os.path.sep), ("@pkgname@", "numpy.core")])
+ for inifile in ["mlib.ini.in", "npymath.ini.in"]:
+ node = conf.path.find_node(inifile)
+ cnt = node.read()
+ for k, v in subst_dict.items():
+ cnt = cnt.replace(k, v)
+ assert node is not None
+ outfile = os.path.join("lib", "npy-pkg-config", inifile)
+ onode = conf.bldnode.make_node(node.path_from(conf.srcnode).replace(
+ inifile, outfile)).change_ext("")
+ onode.write(cnt)
+
def type_checks(conf):
header_name = "Python.h"
features = "c pyext"
@@ -302,30 +315,13 @@ def post_configure(context):
write_numpy_config(conf)
+ write_numpy_inifiles(conf)
+
conf.env.INCLUDES = [".", "include", "include/numpy"]
# FIXME: Should be handled in bento context
conf.store()
-class CTemplateTask(waflib.Task.Task):
- color = 'BLUE'
- before = ['c']
- def run(self):
- s = self.inputs[0]
- cnt = s.read()
- writestr = process_c_str(cnt)
- o = self.outputs[0]
- o.write(writestr)
-
-@waflib.TaskGen.extension(".src")
-def c_template(self, node):
- outs = []
- outs.append(node.change_ext(""))
-
- tsk = self.create_task('CTemplateTask', node, outs)
- if "c" in self.features:
- self.source.append(outs[0])
-
class numpy_api_generator(Task):
vars = ["API_TUPLE"]
color = "BLUE"
@@ -361,6 +357,7 @@ def process_multiarray_api_generator(self):
self.bld.register_outputs("numpy_gen_headers", "multiarray",
[output for output in tsk.outputs if output.suffix() == ".h"],
target_dir="$sitedir/numpy/core/include/numpy")
+
return tsk
@waflib.TaskGen.feature("ufunc_api_gen")
@@ -406,6 +403,14 @@ from os.path import join as pjoin
def pre_build(context):
bld = context.waf_context
+ context.register_category("numpy_gen_inifiles")
+ inifile_mlib = context.local_node.declare(os.path.join(
+ "lib", "npy-pkg-config", "mlib.ini"))
+ inifile_npymath = context.local_node.declare(os.path.join(
+ "lib", "npy-pkg-config", "npymath.ini"))
+ context.register_outputs("numpy_gen_inifiles", "numpyconfig",
+ [inifile_mlib, inifile_npymath])
+
context.register_category("numpy_gen_headers")
numpyconfig_h = context.local_node.declare(os.path.join("include", "numpy", "_numpyconfig.h"))
@@ -432,6 +437,9 @@ def pre_build(context):
if ENABLE_SEPARATE_COMPILATION:
sources = [pjoin('src', 'multiarray', 'arrayobject.c'),
pjoin('src', 'multiarray', 'arraytypes.c.src'),
+ pjoin('src', 'multiarray', 'array_assign.c'),
+ pjoin('src', 'multiarray', 'array_assign_array.c'),
+ pjoin('src', 'multiarray', 'array_assign_scalar.c'),
pjoin('src', 'multiarray', 'buffer.c'),
pjoin('src', 'multiarray', 'calculation.c'),
pjoin('src', 'multiarray', 'common.c'),
@@ -440,6 +448,9 @@ def pre_build(context):
pjoin('src', 'multiarray', 'convert_datatype.c'),
pjoin('src', 'multiarray', 'ctors.c'),
pjoin('src', 'multiarray', 'datetime.c'),
+ pjoin('src', 'multiarray', 'datetime_busday.c'),
+ pjoin('src', 'multiarray', 'datetime_busdaycal.c'),
+ pjoin('src', 'multiarray', 'datetime_strings.c'),
pjoin('src', 'multiarray', 'descriptor.c'),
pjoin('src', 'multiarray', 'dtype_transfer.c'),
pjoin('src', 'multiarray', 'einsum.c.src'),
@@ -452,8 +463,10 @@ def pre_build(context):
pjoin('src', 'multiarray', 'mapping.c'),
pjoin('src', 'multiarray', 'methods.c'),
pjoin('src', 'multiarray', 'multiarraymodule.c'),
- pjoin('src', 'multiarray', 'nditer_pywrap.c'),
pjoin('src', 'multiarray', 'nditer_templ.c.src'),
+ pjoin('src', 'multiarray', 'nditer_api.c'),
+ pjoin('src', 'multiarray', 'nditer_constr.c'),
+ pjoin('src', 'multiarray', 'nditer_pywrap.c'),
pjoin('src', 'multiarray', 'number.c'),
pjoin('src', 'multiarray', 'numpymemoryview.c'),
pjoin('src', 'multiarray', 'numpyos.c'),
@@ -479,8 +492,10 @@ def pre_build(context):
pattern="ufunc_api",
name="ufunc_api")
- ufunc_templates = ["src/umath/loops.c.src",
- "src/umath/funcs.inc.src"]
+ ufunc_templates = [
+ "src/umath/loops.c.src",
+ "src/umath/funcs.inc.src",
+ "src/umath/simd.inc.src"]
bld(target="ufunc_templates", source=ufunc_templates)
bld(features="umath_gen",
@@ -489,9 +504,13 @@ def pre_build(context):
includes = ["src/umath", "src/private"]
if ENABLE_SEPARATE_COMPILATION:
- sources = [pjoin("src", "umath", "umathmodule.c"),
- pjoin("src", "umath", "ufunc_object.c"),
- pjoin("src", "umath", "loops.c.src")]
+ sources = [
+ pjoin("src", "umath", "loops.c.src"),
+ pjoin('src', 'umath', 'reduction.c'),
+ pjoin('src', 'umath', 'ufunc_object.c'),
+ pjoin('src', 'umath', 'ufunc_type_resolution.c'),
+ pjoin("src", "umath", "umathmodule.c"),
+ ]
else:
sources = extension.sources
return context.default_builder(extension,
diff --git a/numpy/core/code_generators/__init__.py b/numpy/core/code_generators/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/core/code_generators/__init__.py
+++ b/numpy/core/code_generators/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/core/code_generators/cversions.py b/numpy/core/code_generators/cversions.py
index 036e923ec..840251aa8 100644
--- a/numpy/core/code_generators/cversions.py
+++ b/numpy/core/code_generators/cversions.py
@@ -1,10 +1,16 @@
-"""Simple script to compute the api hash of the current API as defined by
-numpy_api_order and ufunc_api_order."""
-from os.path import join, dirname
+"""Simple script to compute the api hash of the current API.
+
+The API has is defined by numpy_api_order and ufunc_api_order.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from os.path import dirname
from genapi import fullapi_hash
import numpy_api
+
if __name__ == '__main__':
curdir = dirname(__file__)
- print fullapi_hash(numpy_api.full_api)
+ print(fullapi_hash(numpy_api.full_api))
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 7d3b43d3b..24d376ac6 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -1,4 +1,6 @@
-# hash below were defined from numpy_api_order.txt and ufunc_api_order.txt
+# Hash below were defined from numpy_api_order.txt and ufunc_api_order.txt
+# When adding a new version here for a new minor release, also add the same
+# version as NPY_x_y_API_VERSION in numpyconfig.h
0x00000001 = 603580d224763e58c5e7147f804dc0f5
0x00000002 = 8ecb29306758515ae69749c803a75da1
0x00000003 = bf22c0d05b31625d2a7015988d61ce5a
@@ -13,3 +15,5 @@
0x00000007 = e396ba3912dcf052eaee1b0b203a7724
# Version 8 Added interface to MapIterObject
0x00000008 = 17321775fc884de0b1eda478cd61c74b
+# Version 9 Added interface for partition functions.
+0x00000009 = f99a02b75bd60205d1afe1eed080fd53
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 32b0972a7..62e4f9fc8 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -4,7 +4,10 @@ Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
+
"""
+from __future__ import division, absolute_import, print_function
+
import sys, os, re
try:
import hashlib
@@ -12,8 +15,7 @@ try:
except ImportError:
import md5
md5new = md5.new
-if sys.version_info[:2] < (2, 6):
- from sets import Set as set
+
import textwrap
from os.path import join
@@ -200,7 +202,7 @@ def find_functions(filename, tag='API'):
function_name = None
function_args = []
doclist = []
- SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
+ SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = list(range(5))
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
@@ -251,7 +253,7 @@ def find_functions(filename, tag='API'):
else:
function_args.append(line)
except:
- print(filename, lineno+1)
+ print(filename, lineno + 1)
raise
fo.close()
return functions
@@ -383,7 +385,7 @@ NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
def order_dict(d):
"""Order dict by its values."""
- o = d.items()
+ o = list(d.items())
def _key(x):
return (x[1], x[0])
return sorted(o, key=_key)
@@ -462,7 +464,7 @@ def get_versions_hash():
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
- for line in fid.readlines():
+ for line in fid:
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
@@ -480,8 +482,8 @@ def main():
print(func)
ah = func.api_hash()
m.update(ah)
- print(hex(int(ah,16)))
- print(hex(int(m.hexdigest()[:8],16)))
+ print(hex(int(ah, 16)))
+ print(hex(int(m.hexdigest()[:8], 16)))
if __name__ == '__main__':
main()
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 7cd4b9f6a..ce270a6a0 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
import os
import genapi
@@ -221,8 +223,8 @@ def do_generate_api(targets, sources):
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
- raise AssertionError, "Multiarray API size mismatch %d %d" % \
- (len(multiarray_api_dict), len(multiarray_api_index))
+ raise AssertionError("Multiarray API size mismatch %d %d" %
+ (len(multiarray_api_dict), len(multiarray_api_index)))
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py
index e10b9cd38..6305385af 100644
--- a/numpy/core/code_generators/generate_ufunc_api.py
+++ b/numpy/core/code_generators/generate_ufunc_api.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
import os
import genapi
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 2d8e4360e..1bc22d777 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
import os
import re
import struct
@@ -94,12 +96,11 @@ class Ufunc(object):
Attributes
----------
-
- nin: number of input arguments
- nout: number of output arguments
- identity: identity element for a two-argument function
- docstring: docstring for the ufunc
- type_descriptions: list of TypeDescription objects
+ nin : number of input arguments
+ nout : number of output arguments
+ identity : identity element for a two-argument function
+ docstring : docstring for the ufunc
+ type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions):
@@ -206,6 +207,7 @@ cmplx = 'FDG'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
+inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
@@ -685,6 +687,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
+ TD(inexactvec),
TD(inexact, f='sqrt', astype={'e':'f'}),
TD(P, f='sqrt'),
),
@@ -835,8 +838,7 @@ def make_arrays(funcdict):
#
code1list = []
code2list = []
- names = list(funcdict.keys())
- names.sort()
+ names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
funclist = []
@@ -901,8 +903,7 @@ def make_arrays(funcdict):
def make_ufuncs(funcdict):
code3list = []
- names = list(funcdict.keys())
- names.sort()
+ names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
mlist = []
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index 16278f1a2..ad9ee3eec 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -10,7 +10,9 @@ needs to be updated.
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
exception, so it should hopefully not get unnoticed).
+
"""
+from __future__ import division, absolute_import, print_function
multiarray_global_vars = {
'NPY_NUMUSERTYPES': 7,
@@ -333,6 +335,11 @@ multiarray_funcs_api = {
'PyArray_MapIterSwapAxes': 293,
'PyArray_MapIterArray': 294,
'PyArray_MapIterNext': 295,
+ # End 1.7 API
+ 'PyArray_Partition': 296,
+ 'PyArray_ArgPartition': 297,
+ 'PyArray_SelectkindConverter': 298,
+ # End 1.8 API
}
ufunc_types_api = {
@@ -382,6 +389,9 @@ ufunc_funcs_api = {
# End 1.6 API
'PyUFunc_DefaultTypeResolver': 39,
'PyUFunc_ValidateCasting': 40,
+ # End 1.7 API
+ 'PyUFunc_RegisterLoopForDescr': 41,
+ # End 1.8 API
}
# List of all the dicts which define the C API
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 678303a2e..53ccfcfda 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -1,13 +1,15 @@
-# Docstrings for generated ufuncs
-#
-# The syntax is designed to look like the function add_newdoc is being
-# called from numpy.lib, but in this file add_newdoc puts the docstrings
-# in a dictionary. This dictionary is used in
-# numpy/core/code_generators/generate_umath.py to generate the docstrings
-# for the ufuncs in numpy.core at the C level when the ufuncs are created
-# at compile time.
+"""
+Docstrings for generated ufuncs
+The syntax is designed to look like the function add_newdoc is being
+called from numpy.lib, but in this file add_newdoc puts the docstrings
+in a dictionary. This dictionary is used in
+numpy/core/code_generators/generate_umath.py to generate the docstrings
+for the ufuncs in numpy.core at the C level when the ufuncs are created
+at compile time.
+"""
+from __future__ import division, absolute_import, print_function
docdict = {}
@@ -1099,7 +1101,7 @@ add_newdoc('numpy.core.umath', 'exp2',
See Also
--------
- exp : calculate x**p.
+ power
Notes
-----
@@ -2168,14 +2170,12 @@ add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
- Compare two arrays and returns a new array containing
- the element-wise maxima. If one of the elements being
- compared is a nan, then that element is returned. If
- both elements are nans then the first is returned. The
- latter distinction is important for complex nans,
- which are defined as at least one of the real or
- imaginary parts being a nan. The net effect is that
- nans are propagated.
+ Compare two arrays and returns a new array containing the element-wise
+ maxima. If one of the elements being compared is a nan, then that element
+ is returned. If both elements are nans then the first is returned. The
+ latter distinction is important for complex nans, which are defined as at
+ least one of the real or imaginary parts being a nan. The net effect is
+ that nans are propagated.
Parameters
----------
@@ -2192,25 +2192,27 @@ add_newdoc('numpy.core.umath', 'maximum',
See Also
--------
minimum :
- element-wise minimum
-
+ Element-wise minimum of two arrays, propagating any NaNs.
fmax :
- element-wise maximum that ignores nans unless both inputs are nans.
-
- fmin :
- element-wise minimum that ignores nans unless both inputs are nans.
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+
+ fmin, amin, nanmin
Notes
-----
- Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
- broadcasting.
+ The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
+ x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
- >>> np.maximum(np.eye(2), [0.5, 2])
+ >>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
@@ -2247,11 +2249,15 @@ add_newdoc('numpy.core.umath', 'minimum',
See Also
--------
maximum :
- element-wise minimum that propagates nans.
- fmax :
- element-wise maximum that ignores nans unless both inputs are nans.
+ Element-wise maximum of two arrays, propagating any NaNs.
fmin :
- element-wise minimum that ignores nans unless both inputs are nans.
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+
+ fmax, amax, nanmax
Notes
-----
@@ -2269,6 +2275,8 @@ add_newdoc('numpy.core.umath', 'minimum',
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
+ >>> np.minimum(-np.Inf, 1)
+ -inf
""")
@@ -2298,11 +2306,15 @@ add_newdoc('numpy.core.umath', 'fmax',
See Also
--------
fmin :
- element-wise minimum that ignores nans unless both inputs are nans.
+ Element-wise minimum of two arrays, ignoring any NaNs.
maximum :
- element-wise maximum that propagates nans.
- minimum :
- element-wise minimum that propagates nans.
+ Element-wise maximum of two arrays, propagating any NaNs.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+
+ minimum, amin, nanmin
Notes
-----
@@ -2327,8 +2339,6 @@ add_newdoc('numpy.core.umath', 'fmax',
add_newdoc('numpy.core.umath', 'fmin',
"""
- fmin(x1, x2[, out])
-
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
@@ -2353,11 +2363,15 @@ add_newdoc('numpy.core.umath', 'fmin',
See Also
--------
fmax :
- element-wise maximum that ignores nans unless both inputs are nans.
- maximum :
- element-wise maximum that propagates nans.
+ Element-wise maximum of two arrays, ignoring any NaNs.
minimum :
- element-wise minimum that propagates nans.
+ Element-wise minimum of two arrays, propagating any NaNs.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+
+ maximum, amax, nanmax
Notes
-----
@@ -2798,7 +2812,7 @@ add_newdoc('numpy.core.umath', 'signbit',
Parameters
----------
- x: array_like
+ x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
@@ -2829,9 +2843,9 @@ add_newdoc('numpy.core.umath', 'copysign',
Parameters
----------
- x1: array_like
+ x1 : array_like
Values to change the sign of.
- x2: array_like
+ x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
@@ -2894,7 +2908,7 @@ add_newdoc('numpy.core.umath', 'spacing',
Parameters
----------
- x1: array_like
+ x1 : array_like
Values to find the spacing of.
Returns
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index 1f4b3bef8..121e32314 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -15,13 +15,14 @@ available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
+from __future__ import division, absolute_import, print_function
import sys
-from numerictypes import string_, unicode_, integer, object_, bool_, character
-from numeric import ndarray, compare_chararrays
-from numeric import array as narray
+from .numerictypes import string_, unicode_, integer, object_, bool_, character
+from .numeric import ndarray, compare_chararrays
+from .numeric import array as narray
from numpy.core.multiarray import _vec_string
-from numpy.compat import asbytes
+from numpy.compat import asbytes, long
import numpy
__all__ = ['chararray',
@@ -89,7 +90,7 @@ def _get_num_chars(a):
for a unicode array this is itemsize / 4.
"""
if issubclass(a.dtype.type, unicode_):
- return a.itemsize / 4
+ return a.itemsize // 4
return a.itemsize
@@ -374,69 +375,42 @@ def capitalize(a):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
-if sys.version_info >= (2, 4):
- def center(a, width, fillchar=' '):
- """
- Return a copy of `a` with its elements centered in a string of
- length `width`.
-
- Calls `str.center` element-wise.
-
- Parameters
- ----------
- a : array_like of str or unicode
- width : int
- The length of the resulting strings
- fillchar : str or unicode, optional
- The padding character to use (default is space).
+def center(a, width, fillchar=' '):
+ """
+ Return a copy of `a` with its elements centered in a string of
+ length `width`.
- Returns
- -------
- out : ndarray
- Output array of str or unicode, depending on input
- types
+ Calls `str.center` element-wise.
- See also
- --------
- str.center
+ Parameters
+ ----------
+ a : array_like of str or unicode
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- if numpy.issubdtype(a_arr.dtype, numpy.string_):
- fillchar = asbytes(fillchar)
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
-else:
- def center(a, width):
- """
- Return an array with the elements of `a` centered in a string
- of length width.
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The padding character to use (default is space).
- Calls `str.center` element-wise.
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input
+ types
- Parameters
- ----------
- a : array_like of str or unicode
- width : int
- The length of the resulting strings
+ See also
+ --------
+ str.center
- Returns
- -------
- out : ndarray, str or unicode
- Output array of str or unicode, depending on input types
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
- See also
- --------
- str.center
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'center', (width_arr,))
def count(a, sub, start=0, end=None):
"""
@@ -483,6 +457,7 @@ def count(a, sub, start=0, end=None):
"""
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
+
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
@@ -528,6 +503,7 @@ def decode(a, encoding=None, errors=None):
return _to_string_or_unicode_array(
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
+
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
@@ -562,6 +538,7 @@ def encode(a, encoding=None, errors=None):
return _to_string_or_unicode_array(
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
+
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -605,6 +582,7 @@ def endswith(a, suffix, start=0, end=None):
return _vec_string(
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
@@ -639,6 +617,7 @@ def expandtabs(a, tabsize=8):
return _to_string_or_unicode_array(
_vec_string(a, object_, 'expandtabs', (tabsize,)))
+
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
@@ -673,10 +652,6 @@ def find(a, sub, start=0, end=None):
return _vec_string(
a, integer, 'find', [sub, start] + _clean_args(end))
-# if sys.version_info >= (2.6):
-# def format(a, *args, **kwargs):
-# # _vec_string doesn't support kwargs at present
-# raise NotImplementedError
def index(a, sub, start=0, end=None):
"""
@@ -900,68 +875,41 @@ def join(sep, seq):
return _to_string_or_unicode_array(
_vec_string(sep, object_, 'join', (seq,)))
-if sys.version_info >= (2, 4):
- def ljust(a, width, fillchar=' '):
- """
- Return an array with the elements of `a` left-justified in a
- string of length `width`.
-
- Calls `str.ljust` element-wise.
- Parameters
- ----------
- a : array_like of str or unicode
-
- width : int
- The length of the resulting strings
- fillchar : str or unicode, optional
- The character to use for padding
+def ljust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` left-justified in a
+ string of length `width`.
- Returns
- -------
- out : ndarray
- Output array of str or unicode, depending on input type
+ Calls `str.ljust` element-wise.
- See also
- --------
- str.ljust
+ Parameters
+ ----------
+ a : array_like of str or unicode
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- if numpy.issubdtype(a_arr.dtype, numpy.string_):
- fillchar = asbytes(fillchar)
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
-else:
- def ljust(a, width):
- """
- Return an array with the elements of `a` left-justified in a
- string of length `width`.
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
- Calls `str.ljust` element-wise.
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
- Parameters
- ----------
- a : array_like of str or unicode
- width : int
- The length of the resulting strings
+ See also
+ --------
+ str.ljust
- Returns
- -------
- out : ndarray
- Output array of str or unicode, depending on input type
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
- See also
- --------
- str.ljust
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr,))
def lower(a):
"""
@@ -998,6 +946,7 @@ def lower(a):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lower')
+
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
@@ -1054,40 +1003,41 @@ def lstrip(a, chars=None):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
-if sys.version_info >= (2, 5):
- def partition(a, sep):
- """
- Partition each element in `a` around `sep`.
- Calls `str.partition` element-wise.
+def partition(a, sep):
+ """
+ Partition each element in `a` around `sep`.
- For each element in `a`, split the element as the first
- occurrence of `sep`, and return 3 strings containing the part
- before the separator, the separator itself, and the part after
- the separator. If the separator is not found, return 3 strings
- containing the string itself, followed by two empty strings.
+ Calls `str.partition` element-wise.
- Parameters
- ----------
- a : array_like, {str, unicode}
- Input array
- sep : {str, unicode}
- Separator to split each string element in `a`.
+ For each element in `a`, split the element as the first
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
- Returns
- -------
- out : ndarray, {str, unicode}
- Output array of str or unicode, depending on input type.
- The output array will have an extra dimension with 3
- elements per input element.
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array
+ sep : {str, unicode}
+ Separator to split each string element in `a`.
- See also
- --------
- str.partition
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type.
+ The output array will have an extra dimension with 3
+ elements per input element.
+
+ See also
+ --------
+ str.partition
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'partition', (sep,)))
- """
- return _to_string_or_unicode_array(
- _vec_string(a, object_, 'partition', (sep,)))
def replace(a, old, new, count=None):
"""
@@ -1120,6 +1070,7 @@ def replace(a, old, new, count=None):
_vec_string(
a, object_, 'replace', [old, new] +_clean_args(count)))
+
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
@@ -1151,6 +1102,7 @@ def rfind(a, sub, start=0, end=None):
return _vec_string(
a, integer, 'rfind', [sub, start] + _clean_args(end))
+
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
@@ -1179,140 +1131,113 @@ def rindex(a, sub, start=0, end=None):
return _vec_string(
a, integer, 'rindex', [sub, start] + _clean_args(end))
-if sys.version_info >= (2, 4):
- def rjust(a, width, fillchar=' '):
- """
- Return an array with the elements of `a` right-justified in a
- string of length `width`.
- Calls `str.rjust` element-wise.
+def rjust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` right-justified in a
+ string of length `width`.
- Parameters
- ----------
- a : array_like of str or unicode
+ Calls `str.rjust` element-wise.
- width : int
- The length of the resulting strings
- fillchar : str or unicode, optional
- The character to use for padding
+ Parameters
+ ----------
+ a : array_like of str or unicode
- Returns
- -------
- out : ndarray
- Output array of str or unicode, depending on input type
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
- See also
- --------
- str.rjust
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- if numpy.issubdtype(a_arr.dtype, numpy.string_):
- fillchar = asbytes(fillchar)
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
-else:
- def rjust(a, width):
- """
- Return an array with the elements of `a` right-justified in a
- string of length `width`.
+ See also
+ --------
+ str.rjust
- Calls `str.rjust` element-wise.
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = long(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
- Parameters
- ----------
- a : array_like of str or unicode
- width : int
- The length of the resulting strings
- Returns
- -------
- out : ndarray
- Output array of str or unicode, depending on input type
+def rpartition(a, sep):
+ """
+ Partition (split) each element around the right-most separator.
- See also
- --------
- str.rjust
- """
- a_arr = numpy.asarray(a)
- width_arr = numpy.asarray(width)
- size = long(numpy.max(width_arr.flat))
- return _vec_string(
- a_arr, (a_arr.dtype.type, size), 'rjust', (width,))
+ Calls `str.rpartition` element-wise.
-if sys.version_info >= (2, 5):
- def rpartition(a, sep):
- """
- Partition (split) each element around the right-most separator.
+ For each element in `a`, split the element as the last
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
- Calls `str.rpartition` element-wise.
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array
+ sep : str or unicode
+ Right-most separator to split each element in array.
- For each element in `a`, split the element as the last
- occurrence of `sep`, and return 3 strings containing the part
- before the separator, the separator itself, and the part after
- the separator. If the separator is not found, return 3 strings
- containing the string itself, followed by two empty strings.
+ Returns
+ -------
+ out : ndarray
+ Output array of string or unicode, depending on input
+ type. The output array will have an extra dimension with
+ 3 elements per input element.
- Parameters
- ----------
- a : array_like of str or unicode
- Input array
- sep : str or unicode
- Right-most separator to split each element in array.
+ See also
+ --------
+ str.rpartition
- Returns
- -------
- out : ndarray
- Output array of string or unicode, depending on input
- type. The output array will have an extra dimension with
- 3 elements per input element.
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'rpartition', (sep,)))
- See also
- --------
- str.rpartition
- """
- return _to_string_or_unicode_array(
- _vec_string(a, object_, 'rpartition', (sep,)))
+def rsplit(a, sep=None, maxsplit=None):
+ """
+ For each element in `a`, return a list of the words in the
+ string, using `sep` as the delimiter string.
-if sys.version_info >= (2, 4):
- def rsplit(a, sep=None, maxsplit=None):
- """
- For each element in `a`, return a list of the words in the
- string, using `sep` as the delimiter string.
+ Calls `str.rsplit` element-wise.
- Calls `str.rsplit` element-wise.
+ Except for splitting from the right, `rsplit`
+ behaves like `split`.
- Except for splitting from the right, `rsplit`
- behaves like `split`.
+ Parameters
+ ----------
+ a : array_like of str or unicode
- Parameters
- ----------
- a : array_like of str or unicode
+ sep : str or unicode, optional
+ If `sep` is not specified or `None`, any whitespace string
+ is a separator.
+ maxsplit : int, optional
+ If `maxsplit` is given, at most `maxsplit` splits are done,
+ the rightmost ones.
- sep : str or unicode, optional
- If `sep` is not specified or `None`, any whitespace string
- is a separator.
- maxsplit : int, optional
- If `maxsplit` is given, at most `maxsplit` splits are done,
- the rightmost ones.
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
- Returns
- -------
- out : ndarray
- Array of list objects
+ See also
+ --------
+ str.rsplit, split
- See also
- --------
- str.rsplit, split
+ """
+ # This will return an array of lists of different sizes, so we
+ # leave it as an object array
+ return _vec_string(
+ a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
- """
- # This will return an array of lists of different sizes, so we
- # leave it as an object array
- return _vec_string(
- a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
def rstrip(a, chars=None):
"""
@@ -1357,6 +1282,7 @@ def rstrip(a, chars=None):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
@@ -1390,6 +1316,7 @@ def split(a, sep=None, maxsplit=None):
return _vec_string(
a, object_, 'split', [sep] + _clean_args(maxsplit))
+
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
@@ -1418,6 +1345,7 @@ def splitlines(a, keepends=None):
return _vec_string(
a, object_, 'splitlines', _clean_args(keepends))
+
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -1429,7 +1357,7 @@ def startswith(a, prefix, start=0, end=None):
----------
a : array_like of str or unicode
- suffix : str
+ prefix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
@@ -1448,6 +1376,7 @@ def startswith(a, prefix, start=0, end=None):
return _vec_string(
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
@@ -1495,6 +1424,7 @@ def strip(a, chars=None):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
+
def swapcase(a):
"""
Return element-wise a copy of the string with
@@ -1531,6 +1461,7 @@ def swapcase(a):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+
def title(a):
"""
Return element-wise title cased version of string or unicode.
@@ -1569,6 +1500,7 @@ def title(a):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'title')
+
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
@@ -1604,6 +1536,7 @@ def translate(a, table, deletechars=None):
return _vec_string(
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+
def upper(a):
"""
Return an array with the elements converted to uppercase.
@@ -1639,6 +1572,7 @@ def upper(a):
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'upper')
+
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
@@ -1668,6 +1602,7 @@ def zfill(a, width):
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
+
def isnumeric(a):
"""
For each element, return True if there are only numeric
@@ -1698,6 +1633,7 @@ def isnumeric(a):
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isnumeric')
+
def isdecimal(a):
"""
For each element, return True if there are only decimal
@@ -2078,28 +2014,16 @@ class chararray(ndarray):
"""
return asarray(capitalize(self))
- if sys.version_info >= (2, 4):
- def center(self, width, fillchar=' '):
- """
- Return a copy of `self` with its elements centered in a
- string of length `width`.
-
- See also
- --------
- center
- """
- return asarray(center(self, width, fillchar))
- else:
- def center(self, width):
- """
- Return a copy of `self` with its elements centered in a
- string of length `width`.
+ def center(self, width, fillchar=' '):
+ """
+ Return a copy of `self` with its elements centered in a
+ string of length `width`.
- See also
- --------
- center
- """
- return asarray(center(self, width))
+ See also
+ --------
+ center
+ """
+ return asarray(center(self, width, fillchar))
def count(self, sub, start=0, end=None):
"""
@@ -2284,29 +2208,17 @@ class chararray(ndarray):
"""
return join(self, seq)
- if sys.version_info >= (2, 4):
- def ljust(self, width, fillchar=' '):
- """
- Return an array with the elements of `self` left-justified in a
- string of length `width`.
-
- See also
- --------
- char.ljust
+ def ljust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self` left-justified in a
+ string of length `width`.
- """
- return asarray(ljust(self, width, fillchar))
- else:
- def ljust(self, width):
- """
- Return an array with the elements of `self` left-justified in a
- string of length `width`.
+ See also
+ --------
+ char.ljust
- See also
- --------
- ljust
- """
- return asarray(ljust(self, width))
+ """
+ return asarray(ljust(self, width, fillchar))
def lower(self):
"""
@@ -2332,16 +2244,15 @@ class chararray(ndarray):
"""
return asarray(lstrip(self, chars))
- if sys.version_info >= (2, 5):
- def partition(self, sep):
- """
- Partition each element in `self` around `sep`.
+ def partition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
- See also
- --------
- partition
- """
- return asarray(partition(self, sep))
+ See also
+ --------
+ partition
+ """
+ return asarray(partition(self, sep))
def replace(self, old, new, count=None):
"""
@@ -2380,53 +2291,39 @@ class chararray(ndarray):
"""
return rindex(self, sub, start, end)
- if sys.version_info >= (2, 4):
- def rjust(self, width, fillchar=' '):
- """
- Return an array with the elements of `self`
- right-justified in a string of length `width`.
+ def rjust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self`
+ right-justified in a string of length `width`.
+
+ See also
+ --------
+ char.rjust
- See also
- --------
- char.rjust
+ """
+ return asarray(rjust(self, width, fillchar))
- """
- return asarray(rjust(self, width, fillchar))
- else:
- def rjust(self, width):
- """
- Return an array with the elements of `self`
- right-justified in a string of length `width`.
-
- See also
- --------
- rjust
- """
- return asarray(rjust(self, width))
-
- if sys.version_info >= (2, 5):
- def rpartition(self, sep):
- """
- Partition each element in `self` around `sep`.
-
- See also
- --------
- rpartition
- """
- return asarray(rpartition(self, sep))
-
- if sys.version_info >= (2, 4):
- def rsplit(self, sep=None, maxsplit=None):
- """
- For each element in `self`, return a list of the words in
- the string, using `sep` as the delimiter string.
-
- See also
- --------
- char.rsplit
-
- """
- return rsplit(self, sep, maxsplit)
+ def rpartition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See also
+ --------
+ rpartition
+ """
+ return asarray(rpartition(self, sep))
+
+ def rsplit(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in
+ the string, using `sep` as the delimiter string.
+
+ See also
+ --------
+ char.rsplit
+
+ """
+ return rsplit(self, sep, maxsplit)
def rstrip(self, chars=None):
"""
@@ -2696,7 +2593,7 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
# to divide by the size of a single Unicode character,
# which for Numpy is always 4
if issubclass(obj.dtype.type, unicode_):
- itemsize /= 4
+ itemsize //= 4
if unicode is None:
if issubclass(obj.dtype.type, unicode_):
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index d73f1313c..cb1c4ff05 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1,30 +1,35 @@
-# Module containing non-deprecated functions borrowed from Numeric.
-__docformat__ = "restructuredtext en"
+"""Module containing non-deprecated functions borrowed from Numeric.
-# functions that are now methods
-__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
- 'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
- 'searchsorted', 'alen',
- 'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
- 'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
- 'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
- 'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
- 'amax', 'amin',
- ]
-
-import multiarray as mu
-import umath as um
-import numerictypes as nt
-from numeric import asarray, array, asanyarray, concatenate
-import _methods
-_dt_ = nt.sctype2char
+"""
+from __future__ import division, absolute_import, print_function
import types
+from . import multiarray as mu
+from . import umath as um
+from . import numerictypes as nt
+from .numeric import asarray, array, asanyarray, concatenate
+from . import _methods
+
+_dt_ = nt.sctype2char
+
+
+# functions that are methods
+__all__ = [
+ 'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
+ 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
+ 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
+ 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
+ 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
+ ]
+
+
try:
_gentype = types.GeneratorType
except AttributeError:
- _gentype = types.NoneType
+ _gentype = type(None)
# save away Python sum
_sum_ = sum
@@ -57,6 +62,10 @@ def take(a, indices, axis=None, out=None, mode='raise'):
The source array.
indices : array_like
The indices of the values to extract.
+
+ .. versionadded:: 1.8.0
+
+ Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
@@ -96,6 +105,11 @@ def take(a, indices, axis=None, out=None, mode='raise'):
>>> a[indices]
array([4, 3, 6])
+ If `indices` is not one dimensional, the output also has these dimensions.
+
+ >>> np.take(a, [[0, 1], [2, 3]])
+ array([[4, 3],
+ [5, 7]])
"""
try:
take = a.take
@@ -119,16 +133,23 @@ def reshape(a, newshape, order='C'):
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
- Determines whether the array data should be viewed as in C
- (row-major) order, FORTRAN (column-major) order, or the C/FORTRAN
- order should be preserved.
+ Read the elements of `a` using this index order, and place the elements
+ into the reshaped array using this index order. 'C' means to
+ read / write the elements using C-like index order, with the last axis index
+ changing fastest, back to the first axis index changing slowest. 'F'
+ means to read / write the elements using Fortran-like index order, with
+ the first index changing fastest, and the last index changing slowest.
+ Note that the 'C' and 'F' options take no account of the memory layout
+ of the underlying array, and only refer to the order of indexing. 'A'
+ means to read / write the elements in Fortran-like index order if `a` is
+ Fortran *contiguous* in memory, C-like order otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
- be a copy.
-
+ be a copy. Note there is no guarantee of the *memory layout* (C- or
+ Fortran- contiguous) of the returned array.
See Also
--------
@@ -136,7 +157,6 @@ def reshape(a, newshape, order='C'):
Notes
-----
-
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
@@ -144,12 +164,39 @@ def reshape(a, newshape, order='C'):
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
- # Taking a view makes it possible to modify the shape without modiying the
+ # Taking a view makes it possible to modify the shape without modifying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
+ The `order` keyword gives the index ordering both for *fetching* the values
+ from `a`, and then *placing* the values into the output array. For example,
+ let's say you have an array:
+
+ >>> a = np.arange(6).reshape((3, 2))
+ >>> a
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+
+ You can think of reshaping as first raveling the array (using the given
+ index order), then inserting the elements from the raveled array into the
+ new array using the same kind of index ordering as was used for the
+ raveling.
+
+ >>> np.reshape(a, (2, 3)) # C-like index ordering
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
+ array([[0, 4, 3],
+ [2, 1, 5]])
+ >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
+ array([[0, 4, 3],
+ [2, 1, 5]])
Examples
--------
@@ -163,7 +210,6 @@ def reshape(a, newshape, order='C'):
array([[1, 2],
[3, 4],
[5, 6]])
-
"""
try:
reshape = a.reshape
@@ -488,6 +534,150 @@ def transpose(a, axes=None):
return transpose(axes)
+def partition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Return a partitioned copy of an array.
+
+ Creates a copy of the array with its elements rearranged in such a way that
+ the value of the element in kth position is in the position it would be in
+ a sorted array. All elements smaller than the kth element are moved before
+ this element and all equal or greater are moved behind it. The ordering of
+ the elements in the two partitions is undefined.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ kth : int or sequence of ints
+ Element index to partition by. The kth value of the element will be in
+ its final sorted position and all smaller elements will be moved before
+ it and all equal or greater elements behind it.
+ The order all elements in the partitions is undefined.
+ If provided with a sequence of kth it will partition all elements
+ indexed by kth of them into their sorted position at once.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : list, optional
+ When `a` is a structured array, this argument specifies which fields
+ to compare first, second, and so on. This list does not need to
+ include all of the fields.
+
+ Returns
+ -------
+ partitioned_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.partition : Method to sort an array in-place.
+ argpartition : Indirect partition.
+
+ Notes
+ -----
+ The various selection algorithms are characterized by their average speed,
+ worst case performance, work space size, and whether they are stable. A
+ stable sort keeps items with the same key in the same relative order. The
+ three available algorithms have the following properties:
+
+ ================= ======= ============= ============ =======
+ kind speed worst case work space stable
+ ================= ======= ============= ============ =======
+ 'introselect' 1 O(n) 0 no
+ ================= ======= ============= ============ =======
+
+ All the partition algorithms make temporary copies of the data when
+ partitioning along any but the last axis. Consequently, partitioning
+ along the last axis is faster and uses less space than partitioning
+ along any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the real
+ and imaginary parts are non-nan then the order is determined by the
+ real parts except when they are equal, in which case the order is
+ determined by the imaginary parts.
+
+ Examples
+ --------
+ >>> a = np.array([3, 4, 2, 1])
+ >>> np.partition(a, 3)
+ array([2, 1, 3, 4])
+
+ >>> np.partition(a, (1, 3))
+ array([1, 2, 3, 4])
+
+ """
+ if axis is None:
+ a = asanyarray(a).flatten()
+ axis = 0
+ else:
+ a = asanyarray(a).copy()
+ a.partition(kth, axis=axis, kind=kind, order=order)
+ return a
+
+
+def argpartition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Perform an indirect partition along the given axis using the algorithm
+ specified by the `kind` keyword. It returns an array of indices of the
+ same shape as `a` that index data along the given axis in partitioned
+ order.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to sort.
+ kth : int or sequence of ints
+ Element index to partition by. The kth element will be in its final
+ sorted position and all smaller elements will be moved before it and
+ all larger elements behind it.
+ The order all elements in the partitions is undefined.
+ If provided with a sequence of kth it will partition all of them into
+ their sorted position at once.
+ axis : int or None, optional
+ Axis along which to sort. The default is -1 (the last axis). If None,
+ the flattened array is used.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'
+ order : list, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. Not all fields need be
+ specified.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that partition `a` along the specified axis.
+ In other words, ``a[index_array]`` yields a sorted `a`.
+
+ See Also
+ --------
+ partition : Describes partition algorithms used.
+ ndarray.partition : Inplace partition.
+
+ Notes
+ -----
+ See `partition` for notes on the different selection algorithms.
+
+ Examples
+ --------
+ One dimensional array:
+
+ >>> x = np.array([3, 4, 2, 1])
+ >>> x[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+ >>> x[np.argpartition(x, (1, 3))]
+ array([1, 2, 3, 4])
+
+ """
+ return a.argpartition(kth, axis, kind=kind, order=order)
+
+
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
@@ -939,10 +1129,10 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
on this fact is deprecated. Writing to the resulting array continues to
work as it used to, but a FutureWarning will be issued.
- In NumPy 1.8, it will switch to returning a read-only view on the original
+ In NumPy 1.9, it will switch to returning a read-only view on the original
array. Attempting to write to the resulting array will produce an error.
- In NumPy 1.9, it will still return a view, but this view will no longer be
+ In NumPy 1.10, it will still return a view, but this view will no longer be
marked read-only. Writing to the returned array will alter your original
array as well.
@@ -1094,21 +1284,25 @@ def ravel(a, order='C'):
Parameters
----------
a : array_like
- Input array. The elements in ``a`` are read in the order specified by
+ Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
- The elements of ``a`` are read in this order. 'C' means to view
- the elements in C (row-major) order. 'F' means to view the elements
- in Fortran (column-major) order. 'A' means to view the elements
- in 'F' order if a is Fortran contiguous, 'C' order otherwise.
- 'K' means to view the elements in the order they occur in memory,
- except for reversing the data when strides are negative.
- By default, 'C' order is used.
+ The elements of `a` are read using this index order. 'C' means to
+ index the elements in C-like order, with the last axis index changing
+ fastest, back to the first axis index changing slowest. 'F' means to
+ index the elements in Fortran-like index order, with the first index
+ changing fastest, and the last index changing slowest. Note that the 'C'
+ and 'F' options take no account of the memory layout of the underlying
+ array, and only refer to the order of axis indexing. 'A' means to read
+ the elements in Fortran-like index order if `a` is Fortran *contiguous*
+ in memory, C-like order otherwise. 'K' means to read the elements in
+ the order they occur in memory, except for reversing the data when
+ strides are negative. By default, 'C' index order is used.
Returns
-------
1d_array : ndarray
- Output of the same dtype as `a`, and of shape ``(a.size(),)``.
+ Output of the same dtype as `a`, and of shape ``(a.size,)``.
See Also
--------
@@ -1118,11 +1312,11 @@ def ravel(a, order='C'):
Notes
-----
- In row-major order, the row index varies the slowest, and the column
- index the quickest. This can be generalized to multiple dimensions,
- where row-major order implies that the index along the first axis
- varies slowest, and the index along the last quickest. The opposite holds
- for Fortran-, or column-major, mode.
+ In C-like (row-major) order, in two dimensions, the row index varies the
+ slowest, and the column index the quickest. This can be generalized to
+ multiple dimensions, where row-major order implies that the index along the
+ first axis varies slowest, and the index along the last quickest. The
+ opposite holds for Fortran-like, or column-major, index ordering.
Examples
--------
@@ -1328,7 +1522,8 @@ def compress(condition, a, axis=None, out=None):
See Also
--------
take, choose, diag, diagonal, select
- ndarray.compress : Equivalent method.
+ ndarray.compress : Equivalent method in ndarray
+ np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
@@ -1502,7 +1697,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False):
out[...] = res
return out
return res
- elif not (type(a) is mu.ndarray):
+ elif type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
@@ -1751,6 +1946,8 @@ def cumsum (a, axis=None, dtype=None, out=None):
trapz : Integration of array values using the composite trapezoidal rule.
+ diff : Calculate the n-th order discrete difference along given axis.
+
Notes
-----
Arithmetic is modular when using integer types, and no error is
@@ -1853,11 +2050,11 @@ def amax(a, axis=None, out=None, keepdims=False):
a : array_like
Input data.
axis : int, optional
- Axis along which to operate. By default flattened input is used.
+ Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
- Alternate output array in which to place the result. Must be of
- the same shape and buffer length as the expected output. See
- `doc.ufuncs` (Section "Output arguments") for more details.
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -1872,15 +2069,28 @@ def amax(a, axis=None, out=None, keepdims=False):
See Also
--------
- nanmax : NaN values are ignored instead of being propagated.
- fmax : same behavior as the C99 fmax function.
- argmax : indices of the maximum values.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ argmax :
+ Return the indices of the maximum values.
+
+ nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
- corresponding max value will be NaN as well. To ignore NaN values
+ corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
+
+ Don't use `amax` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
+ ``amax(a, axis=0)``.
Examples
--------
@@ -1888,11 +2098,11 @@ def amax(a, axis=None, out=None, keepdims=False):
>>> a
array([[0, 1],
[2, 3]])
- >>> np.amax(a)
+ >>> np.amax(a) # Maximum of the flattened array
3
- >>> np.amax(a, axis=0)
+ >>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
- >>> np.amax(a, axis=1)
+ >>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
@@ -1903,13 +2113,13 @@ def amax(a, axis=None, out=None, keepdims=False):
4.0
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
return _methods._amax(a, axis=axis,
out=out, keepdims=keepdims)
- # NOTE: Dropping and keepdims parameter
+ # NOTE: Dropping the keepdims parameter
return amax(axis=axis, out=out)
else:
return _methods._amax(a, axis=axis,
@@ -1924,7 +2134,7 @@ def amin(a, axis=None, out=None, keepdims=False):
a : array_like
Input data.
axis : int, optional
- Axis along which to operate. By default a flattened input is used.
+ Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
@@ -1936,22 +2146,35 @@ def amin(a, axis=None, out=None, keepdims=False):
Returns
-------
- amin : ndarray
- A new array or a scalar array with the result.
+ amin : ndarray or scalar
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is given, the result is an array of dimension
+ ``a.ndim - 1``.
See Also
--------
- nanmin: nan values are ignored instead of being propagated
- fmin: same behavior as the C99 fmin function
- argmin: Return the indices of the minimum values.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ argmin :
+ Return the indices of the minimum values.
- amax, nanmax, fmax
+ nanmax, maximum, fmax
Notes
-----
- NaN values are propagated, that is if at least one item is nan, the
- corresponding min value will be nan as well. To ignore NaN values (matlab
- behavior), please use nanmin.
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding min value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmin.
+
+ Don't use `amin` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
+ ``amin(a, axis=0)``.
Examples
--------
@@ -1961,9 +2184,9 @@ def amin(a, axis=None, out=None, keepdims=False):
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
- >>> np.amin(a, axis=0) # Minima along the first axis
+ >>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
- >>> np.amin(a, axis=1) # Minima along the second axis
+ >>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
@@ -1974,7 +2197,7 @@ def amin(a, axis=None, out=None, keepdims=False):
0.0
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
@@ -2104,7 +2327,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False):
True
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
@@ -2441,6 +2664,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
See Also
--------
average : Weighted average
+ std, var, nanmean, nanstd, nanvar
Notes
-----
@@ -2477,7 +2701,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
0.55000000074505806
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
mean = a.mean
return mean(axis=axis, dtype=dtype, out=out)
@@ -2487,7 +2711,6 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
-
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Compute the standard deviation along the specified axis.
@@ -2528,7 +2751,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
See Also
--------
- var, mean
+ var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
@@ -2579,7 +2802,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
0.44999999925552653
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
std = a.std
return std(axis=axis, dtype=dtype, out=out, ddof=ddof)
@@ -2631,8 +2854,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
See Also
--------
- std : Standard deviation
- mean : Average
+ std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
@@ -2682,7 +2904,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
0.20250000000000001
"""
- if not (type(a) is mu.ndarray):
+ if type(a) is not mu.ndarray:
try:
var = a.var
return var(axis=axis, dtype=dtype, out=out, ddof=ddof)
@@ -2691,3 +2913,4 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
+
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 82b524604..686425623 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -1,7 +1,9 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['logspace', 'linspace']
-import numeric as _nx
-from numeric import array
+from . import numeric as _nx
+from .numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 4754975e2..93210a23b 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -1,12 +1,14 @@
-""" Machine limits for Float32 and Float64 and (long double) if available...
+"""Machine limits for Float32 and Float64 and (long double) if available...
+
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['finfo','iinfo']
-from machar import MachAr
-import numeric
-import numerictypes as ntypes
-from numeric import array
+from .machar import MachAr
+from . import numeric
+from . import numerictypes as ntypes
+from .numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
@@ -258,7 +260,7 @@ class iinfo(object):
try:
val = iinfo._min_vals[self.key]
except KeyError:
- val = int(-(1L << (self.bits-1)))
+ val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
@@ -270,9 +272,9 @@ class iinfo(object):
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
- val = int((1L << self.bits) - 1)
+ val = int((1 << self.bits) - 1)
else:
- val = int((1L << (self.bits-1)) - 1)
+ val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
@@ -294,11 +296,11 @@ max = %(max)s
if __name__ == '__main__':
f = finfo(ntypes.single)
- print 'single epsilon:',f.eps
- print 'single tiny:',f.tiny
+ print('single epsilon:',f.eps)
+ print('single tiny:',f.tiny)
f = finfo(ntypes.float)
- print 'float epsilon:',f.eps
- print 'float tiny:',f.tiny
+ print('float epsilon:',f.eps)
+ print('float tiny:',f.tiny)
f = finfo(ntypes.longfloat)
- print 'longfloat epsilon:',f.eps
- print 'longfloat tiny:',f.tiny
+ print('longfloat epsilon:',f.eps)
+ print('longfloat tiny:',f.tiny)
diff --git a/numpy/core/include/numpy/arrayobject.h b/numpy/core/include/numpy/arrayobject.h
index a84766f63..4f46d6b1a 100644
--- a/numpy/core/include/numpy/arrayobject.h
+++ b/numpy/core/include/numpy/arrayobject.h
@@ -1,14 +1,3 @@
-
-/* This expects the following variables to be defined (besides
- the usual ones from pyconfig.h
-
- SIZEOF_LONG_DOUBLE -- sizeof(long double) or sizeof(double) if no
- long double is present on platform.
- CHAR_BIT -- number of bits in a char (usually 8)
- (should be in limits.h)
-
-*/
-
#ifndef Py_ARRAYOBJECT_H
#define Py_ARRAYOBJECT_H
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index f00dd7744..b8c7c3a2d 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -225,15 +225,8 @@ PyArray_XDECREF_ERR(PyArrayObject *arr)
(PyTuple_GET_ITEM((value), 2) == (key)))
-/* Define python version independent deprecation macro */
-
-#if PY_VERSION_HEX >= 0x02050000
#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
-#else
-#define DEPRECATE(msg) PyErr_Warn(PyExc_DeprecationWarning,msg)
-#define DEPRECATE_FUTUREWARNING(msg) PyErr_Warn(PyExc_FutureWarning,msg)
-#endif
#ifdef __cplusplus
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index c84eb6277..fd64cd75f 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -1,9 +1,6 @@
#ifndef NDARRAYTYPES_H
#define NDARRAYTYPES_H
-/* numpyconfig.h is auto-generated by the installer */
-#include "numpyconfig.h"
-
#include "npy_common.h"
#include "npy_endian.h"
#include "npy_cpu.h"
@@ -159,6 +156,12 @@ typedef enum {
typedef enum {
+ NPY_INTROSELECT=0,
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
NPY_SEARCHLEFT=0,
NPY_SEARCHRIGHT=1
} NPY_SEARCHSIDE;
@@ -394,6 +397,12 @@ typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
+typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
@@ -630,8 +639,8 @@ typedef struct _arr_descr {
* (PyArray_DATA and friends) to access fields here for a number of
* releases. Direct access to the members themselves is deprecated.
* To ensure that your code does not use deprecated access,
- * #define NPY_NO_DEPRECATED_API NPY_1_7_VERSION
- * (or NPY_1_8_VERSION or higher as required).
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * (or NPY_1_8_API_VERSION or higher as required).
*/
/* This struct will be moved to a private header in a future release */
typedef struct tagPyArrayObject_fields {
@@ -678,7 +687,8 @@ typedef struct tagPyArrayObject_fields {
* To hide the implementation details, we only expose
* the Python struct HEAD.
*/
-#if !(defined(NPY_NO_DEPRECATED_API) && (NPY_API_VERSION <= NPY_NO_DEPRECATED_API))
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
/*
* Can't put this in npy_deprecated_api.h like the others.
* PyArrayObject field access is deprecated as of NumPy 1.7.
@@ -756,9 +766,15 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_ARRAY_F_CONTIGUOUS 0x0002
/*
- * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. An N-d
- * array that is C_CONTIGUOUS is also F_CONTIGUOUS if only
- * one axis has a dimension different from one (ie. a 1x3x1 array).
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional
+ * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements
+ * and the array is contiguous if ndarray.squeeze() is contiguous.
+ * I.e. dimensions for which `ndarray.shape[dimension] == 1` are
+ * ignored.
*/
/*
@@ -918,6 +934,8 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0);
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \
+ { _save = PyEval_SaveThread();} } while (0);
#define NPY_BEGIN_THREADS_DESCR(dtype) \
do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \
@@ -1272,6 +1290,10 @@ typedef struct {
npy_intp bscoord[NPY_MAXDIMS];
PyObject *indexobj; /* creating obj */
+ /*
+ * consec is first used to indicate wether fancy indices are
+ * consecutive and then denotes at which axis they are inserted
+ */
int consec;
char *dataptr;
@@ -1376,7 +1398,7 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \
NPY_ARRAY_F_CONTIGUOUS : 0))
-#if (defined(NPY_NO_DEPRECATED_API) && (NPY_API_VERSION <= NPY_NO_DEPRECATED_API))
+#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API))
/*
* Changing access macros into functions, to allow for future hiding
* of the internal memory layout. This later hiding will allow the 2.x series
@@ -1390,12 +1412,18 @@ PyArray_NDIM(const PyArrayObject *arr)
return ((PyArrayObject_fields *)arr)->nd;
}
-static NPY_INLINE char *
+static NPY_INLINE void *
PyArray_DATA(PyArrayObject *arr)
{
return ((PyArrayObject_fields *)arr)->data;
}
+static NPY_INLINE char *
+PyArray_BYTES(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
static NPY_INLINE npy_intp *
PyArray_DIMS(PyArrayObject *arr)
{
@@ -1470,15 +1498,12 @@ PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
v, itemptr, arr);
}
-/* Same as PyArray_DATA */
-#define PyArray_BYTES(arr) PyArray_DATA(arr)
-
#else
/* These macros are deprecated as of NumPy 1.7. */
#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd)
#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data)
-#define PyArray_DATA(obj) (((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data)
#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions)
#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides)
#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n])
@@ -1673,7 +1698,7 @@ typedef struct {
/************************************************************
* This is the form of the struct that's returned pointed by the
* PyCObject attribute of an array __array_struct__. See
- * http://numpy.scipy.org/array_interface.shtml for the full
+ * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
* documentation.
************************************************************/
typedef struct {
@@ -1722,8 +1747,30 @@ typedef struct {
typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
void *user_data);
-#if !(defined(NPY_NO_DEPRECATED_API) && (NPY_API_VERSION <= NPY_NO_DEPRECATED_API))
-#include "npy_deprecated_api.h"
+/*
+ * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files
+ * npy_*_*_deprecated_api.h are only included from here and nowhere else.
+ */
+#ifdef NPY_DEPRECATED_INCLUDES
+#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."
#endif
+#define NPY_DEPRECATED_INCLUDES
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+#include "npy_1_7_deprecated_api.h"
+#endif
+/*
+ * There is no file npy_1_8_deprecated_api.h since there are no additional
+ * deprecated API features in NumPy 1.8.
+ *
+ * Note to maintainers: insert code like the following in future NumPy
+ * versions.
+ *
+ * #if !defined(NPY_NO_DEPRECATED_API) || \
+ * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)
+ * #include "npy_1_9_deprecated_api.h"
+ * #endif
+ */
+#undef NPY_DEPRECATED_INCLUDES
#endif /* NPY_ARRAYTYPES_H */
diff --git a/numpy/core/include/numpy/noprefix.h b/numpy/core/include/numpy/noprefix.h
index b3e57480e..830617087 100644
--- a/numpy/core/include/numpy/noprefix.h
+++ b/numpy/core/include/numpy/noprefix.h
@@ -64,10 +64,13 @@
#define datetime npy_datetime
#define timedelta npy_timedelta
-#define SIZEOF_INTP NPY_SIZEOF_INTP
-#define SIZEOF_UINTP NPY_SIZEOF_UINTP
-#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
-#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
+#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
+#define SIZEOF_INTP NPY_SIZEOF_INTP
+#define SIZEOF_UINTP NPY_SIZEOF_UINTP
+#define SIZEOF_HALF NPY_SIZEOF_HALF
+#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
+#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
+#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
#define LONGLONG_FMT NPY_LONGLONG_FMT
#define ULONGLONG_FMT NPY_ULONGLONG_FMT
@@ -113,9 +116,6 @@
#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
-#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
-#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
-#define SIZEOF_HALF NPY_SIZEOF_HALF
#define BITSOF_BOOL NPY_BITSOF_BOOL
#define BITSOF_CHAR NPY_BITSOF_CHAR
#define BITSOF_SHORT NPY_BITSOF_SHORT
diff --git a/numpy/core/include/numpy/npy_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
index f082d0827..6c576d138 100644
--- a/numpy/core/include/numpy/npy_deprecated_api.h
+++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -1,28 +1,30 @@
-#ifndef _NPY_DEPRECATED_API_H
-#define _NPY_DEPRECATED_API_H
+#ifndef _NPY_1_7_DEPRECATED_API_H
+#define _NPY_1_7_DEPRECATED_API_H
+
+#ifndef NPY_DEPRECATED_INCLUDES
+#error "Should never include npy_*_*_deprecated_api directly."
+#endif
#if defined(_WIN32)
#define _WARN___STR2__(x) #x
#define _WARN___STR1__(x) _WARN___STR2__(x)
#define _WARN___LOC__ __FILE__ "("_WARN___STR1__(__LINE__)") : Warning Msg: "
#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \
- "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+ "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
#elif defined(__GNUC__)
-#warning "Using deprecated NumPy API, disable it by #defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+#warning "Using deprecated NumPy API, disable it by " \
+ "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
#endif
/* TODO: How to do this warning message for other compilers? */
/*
- * This header exists to collect all dangerous/deprecated NumPy API.
+ * This header exists to collect all dangerous/deprecated NumPy API
+ * as of NumPy 1.7.
*
* This is an attempt to remove bad API, the proliferation of macros,
* and namespace pollution currently produced by the NumPy headers.
*/
-#if defined(NPY_NO_DEPRECATED_API)
-#error Should never include npy_deprecated_api directly.
-#endif
-
/* These array flags are deprecated as of NumPy 1.7 */
#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
@@ -125,5 +127,4 @@
*/
#include "old_defines.h"
-
#endif
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 7fca7e220..e5fd09e91 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -3,6 +3,38 @@
/* numpconfig.h is auto-generated */
#include "numpyconfig.h"
+#ifdef HAVE_NPY_CONFIG_H
+#include <npy_config.h>
+#endif
+
+/*
+ * gcc does not unroll even with -O3
+ * use with care, unrolling on modern cpus rarely speeds things up
+ */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
+#define NPY_GCC_UNROLL_LOOPS \
+ __attribute__((optimize("unroll-loops")))
+#else
+#define NPY_GCC_UNROLL_LOOPS
+#endif
+
+/*
+ * give a hint to the compiler which branch is more likely or unlikely
+ * to occur, e.g. rare error cases:
+ *
+ * if (NPY_UNLIKELY(failure == 0))
+ * return NULL;
+ *
+ * the double !! is to cast the expression (e.g. NULL) to a boolean required by
+ * the intrinsic
+ */
+#ifdef HAVE___BUILTIN_EXPECT
+#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
+#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define NPY_LIKELY(x) (x)
+#define NPY_UNLIKELY(x) (x)
+#endif
#if defined(_MSC_VER)
#define NPY_INLINE __inline
@@ -33,15 +65,22 @@ enum {
};
/*
- * This is to typedef npy_intp to the appropriate pointer size for
- * this platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h.
+ * This is to typedef npy_intp to the appropriate pointer size for this
+ * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h.
*/
typedef Py_intptr_t npy_intp;
typedef Py_uintptr_t npy_uintp;
+
+/*
+ * Define sizes that were not defined in numpyconfig.h.
+ */
#define NPY_SIZEOF_CHAR 1
#define NPY_SIZEOF_BYTE 1
+#define NPY_SIZEOF_DATETIME 8
+#define NPY_SIZEOF_TIMEDELTA 8
#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T
#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_HALF 2
#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
@@ -50,18 +89,8 @@ typedef Py_uintptr_t npy_uintp;
#undef constchar
#endif
-#if (PY_VERSION_HEX < 0x02050000)
- #ifndef PY_SSIZE_T_MIN
- typedef int Py_ssize_t;
- #define PY_SSIZE_T_MAX INT_MAX
- #define PY_SSIZE_T_MIN INT_MIN
- #endif
-#define NPY_SSIZE_T_PYFMT "i"
-#define constchar const char
-#else
#define NPY_SSIZE_T_PYFMT "n"
#define constchar char
-#endif
/* NPY_INTP_FMT Note:
* Unlike the other NPY_*_FMT macros which are used with
@@ -305,10 +334,6 @@ typedef struct { npy_longdouble real, imag; } npy_clongdouble;
#define NPY_MIN_LONG LONG_MIN
#define NPY_MAX_ULONG ULONG_MAX
-#define NPY_SIZEOF_HALF 2
-#define NPY_SIZEOF_DATETIME 8
-#define NPY_SIZEOF_TIMEDELTA 8
-
#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
#define NPY_BITSOF_CHAR CHAR_BIT
#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 8a2978806..ab14731b8 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -66,6 +66,10 @@
#define NPY_CPU_MIPSEL
#elif defined(__MIPSEB__)
#define NPY_CPU_MIPSEB
+#elif defined(__aarch64__)
+ #define NPY_CPU_AARCH64
+#elif defined(__mc68000__)
+ #define NPY_CPU_M68K
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index aa5ed8b2b..36d09232f 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -25,6 +25,7 @@
|| defined(NPY_CPU_IA64) \
|| defined(NPY_CPU_ALPHA) \
|| defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_AARCH64) \
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
@@ -35,7 +36,8 @@
|| defined(NPY_CPU_PPC64) \
|| defined(NPY_CPU_ARMEB) \
|| defined(NPY_CPU_SH_BE) \
- || defined(NPY_CPU_MIPSEB)
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_M68K)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
#error Unknown CPU: can not set endianness
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index 56c1c2b28..537a63ee4 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -1,7 +1,14 @@
#ifndef __NPY_MATH_C99_H_
#define __NPY_MATH_C99_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <math.h>
+#ifdef HAVE_NPY_CONFIG_H
+#include <npy_config.h>
+#endif
#include <numpy/npy_common.h>
/*
@@ -144,33 +151,51 @@ double npy_spacing(double x);
/*
* IEEE 754 fpu handling. Those are guaranteed to be macros
*/
-#ifndef NPY_HAVE_DECL_ISNAN
- #define npy_isnan(x) ((x) != (x))
+
+/* use builtins to avoid function calls in tight loops
+ * only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISNAN
+ #define npy_isnan(x) __builtin_isnan(x)
#else
- #ifdef _MSC_VER
- #define npy_isnan(x) _isnan((x))
+ #ifndef NPY_HAVE_DECL_ISNAN
+ #define npy_isnan(x) ((x) != (x))
#else
- #define npy_isnan(x) isnan((x))
+ #ifdef _MSC_VER
+ #define npy_isnan(x) _isnan((x))
+ #else
+ #define npy_isnan(x) isnan(x)
+ #endif
#endif
#endif
-#ifndef NPY_HAVE_DECL_ISFINITE
- #ifdef _MSC_VER
- #define npy_isfinite(x) _finite((x))
+
+/* only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISFINITE
+ #define npy_isfinite(x) __builtin_isfinite(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISFINITE
+ #ifdef _MSC_VER
+ #define npy_isfinite(x) _finite((x))
+ #else
+ #define npy_isfinite(x) !npy_isnan((x) + (-x))
+ #endif
#else
- #define npy_isfinite(x) !npy_isnan((x) + (-x))
+ #define npy_isfinite(x) isfinite((x))
#endif
-#else
- #define npy_isfinite(x) isfinite((x))
#endif
-#ifndef NPY_HAVE_DECL_ISINF
- #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
+/* only available if npy_config.h is available (= numpys own build) */
+#if HAVE___BUILTIN_ISINF
+ #define npy_isinf(x) __builtin_isinf(x)
#else
- #ifdef _MSC_VER
- #define npy_isinf(x) (!_finite((x)) && !_isnan((x)))
+ #ifndef NPY_HAVE_DECL_ISINF
+ #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
#else
- #define npy_isinf(x) isinf((x))
+ #ifdef _MSC_VER
+ #define npy_isinf(x) (!_finite((x)) && !_isnan((x)))
+ #else
+ #define npy_isinf(x) isinf((x))
+ #endif
#endif
#endif
@@ -432,4 +457,8 @@ void npy_set_floatstatus_overflow(void);
void npy_set_floatstatus_underflow(void);
void npy_set_floatstatus_invalid(void);
+#ifdef __cplusplus
+}
+#endif
+
#endif
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 401d19fd7..5ca171f21 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -9,16 +9,16 @@
* harcoded
*/
#ifdef __APPLE__
- #undef NPY_SIZEOF_LONG
- #undef NPY_SIZEOF_PY_INTPTR_T
+ #undef NPY_SIZEOF_LONG
+ #undef NPY_SIZEOF_PY_INTPTR_T
- #ifdef __LP64__
- #define NPY_SIZEOF_LONG 8
- #define NPY_SIZEOF_PY_INTPTR_T 8
- #else
- #define NPY_SIZEOF_LONG 4
- #define NPY_SIZEOF_PY_INTPTR_T 4
- #endif
+ #ifdef __LP64__
+ #define NPY_SIZEOF_LONG 8
+ #define NPY_SIZEOF_PY_INTPTR_T 8
+ #else
+ #define NPY_SIZEOF_LONG 4
+ #define NPY_SIZEOF_PY_INTPTR_T 4
+ #endif
#endif
/**
@@ -29,5 +29,6 @@
* #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
*/
#define NPY_1_7_API_VERSION 0x00000007
+#define NPY_1_8_API_VERSION 0x00000008
#endif
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index 076dd880c..8df96a44f 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -212,6 +212,20 @@ typedef struct _tagPyUFuncObject {
* A function which returns a masked inner loop for the ufunc.
*/
PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
+
+ /*
+ * List of flags for each operand when ufunc is called by nditer object.
+ * These flags will be used in addition to the default flags for each
+ * operand set by nditer object.
+ */
+ npy_uint32 *op_flags;
+
+ /*
+ * List of global flags used when ufunc is called by nditer object.
+ * These flags will be used in addition to the default global flags
+ * set by nditer object.
+ */
+ npy_uint32 iter_flags;
} PyUFuncObject;
#include "arrayobject.h"
@@ -305,6 +319,8 @@ typedef struct _loop1d_info {
void *data;
int *arg_types;
struct _loop1d_info *next;
+ int nargs;
+ PyArray_Descr **arg_dtypes;
} PyUFunc_Loop1d;
@@ -390,7 +406,7 @@ typedef struct _loop1d_info {
defined(__MINGW32__) || defined(__FreeBSD__)
#include <fenv.h>
#elif defined(__CYGWIN__)
-#include "fenv/fenv.c"
+#include "numpy/fenv/fenv.h"
#endif
#define UFUNC_CHECK_STATUS(ret) { \
diff --git a/numpy/core/info.py b/numpy/core/info.py
index 561e171b0..241f209b5 100644
--- a/numpy/core/info.py
+++ b/numpy/core/info.py
@@ -1,4 +1,4 @@
-__doc__ = """Defines a multi-dimensional array and useful procedures for Numerical computation.
+"""Defines a multi-dimensional array and useful procedures for Numerical computation.
Functions
@@ -81,6 +81,7 @@ More Functions:
arccosh arcsinh arctanh
"""
+from __future__ import division, absolute_import, print_function
depends = ['testing']
global_symbols = ['*']
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index ea9174017..9eb4430a6 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -1,14 +1,16 @@
"""
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
-"""
-# Author: Pearu Peterson, September 2003
+Author: Pearu Peterson, September 2003
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
-from numpy.core.numeric import seterr
+from numpy.core.numeric import errstate
# Need to speed this up...especially for longfloat
@@ -105,11 +107,8 @@ class MachAr(object):
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
- saverrstate = seterr(under='ignore')
- try:
+ with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
- finally:
- seterr(**saverrstate)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
@@ -121,7 +120,7 @@ class MachAr(object):
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
@@ -130,7 +129,7 @@ class MachAr(object):
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
@@ -144,7 +143,7 @@ class MachAr(object):
# Determine it and irnd
it = -1
b = one
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
@@ -156,7 +155,7 @@ class MachAr(object):
betah = beta / two
a = one
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
@@ -180,7 +179,7 @@ class MachAr(object):
for i in range(negep):
a = a * betain
b = a
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
@@ -199,7 +198,7 @@ class MachAr(object):
machep = - it - 3
a = b
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
@@ -221,7 +220,7 @@ class MachAr(object):
z = betain
t = one + eps
nxres = 0
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
@@ -247,7 +246,7 @@ class MachAr(object):
mx = iz + iz - 1
# Determine minexp and xmin
- for _ in xrange(max_iterN):
+ for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
@@ -336,4 +335,4 @@ maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)
if __name__ == '__main__':
- print MachAr()
+ print(MachAr())
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index f068c7b6c..53662ce89 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -1,10 +1,13 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['memmap']
import warnings
-from numeric import uint8, ndarray, dtype
import sys
import numpy as np
+from .numeric import uint8, ndarray, dtype
+from numpy.compat import long, basestring
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
@@ -199,7 +202,7 @@ class memmap(ndarray):
except KeyError:
if mode not in valid_filemodes:
raise ValueError("mode must be one of %s" %
- (valid_filemodes + mode_equivalents.keys()))
+ (valid_filemodes + list(mode_equivalents.keys())))
if hasattr(filename,'read'):
fid = filename
@@ -245,14 +248,10 @@ class memmap(ndarray):
else:
acc = mmap.ACCESS_WRITE
- if sys.version_info[:2] >= (2,6):
- # The offset keyword in mmap.mmap needs Python >= 2.6
- start = offset - offset % mmap.ALLOCATIONGRANULARITY
- bytes -= start
- offset -= start
- mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
- else:
- mm = mmap.mmap(fid.fileno(), bytes, access=acc)
+ start = offset - offset % mmap.ALLOCATIONGRANULARITY
+ bytes -= start
+ offset -= start
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=offset, order=order)
@@ -262,8 +261,13 @@ class memmap(ndarray):
if isinstance(filename, basestring):
self.filename = os.path.abspath(filename)
- elif hasattr(filename, "name"):
+ # py3 returns int for TemporaryFile().name
+ elif (hasattr(filename, "name") and
+ isinstance(filename.name, basestring)):
self.filename = os.path.abspath(filename.name)
+ # same as memmap copies (e.g. memmap + 1)
+ else:
+ self.filename = None
if own_file:
fid.close()
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 0d0babbac..e2c020ced 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,3 +1,23 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+import collections
+from . import multiarray
+from . import umath
+from .umath import *
+from . import numerictypes
+from .numerictypes import *
+
+if sys.version_info[0] >= 3:
+ import pickle
+ basestring = str
+else:
+ import cPickle as pickle
+
+loads = pickle.loads
+
+
__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero',
'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile',
@@ -20,20 +40,12 @@ __all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
- 'ComplexWarning']
-
-import sys
-import warnings
-import multiarray
-import umath
-from umath import *
-import numerictypes
-from numerictypes import *
-
+ 'ComplexWarning', 'may_share_memory', 'full', 'full_like']
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
+
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
@@ -67,8 +79,6 @@ def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
- With default parameters, is equivalent to ``a.copy().fill(0)``.
-
Parameters
----------
a : array_like
@@ -83,6 +93,10 @@ def zeros_like(a, dtype=None, order='K', subok=True):
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
Returns
-------
@@ -123,7 +137,21 @@ def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
- Please refer to the documentation for `zeros` for further details.
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of ones with the given shape, dtype, and order.
See Also
--------
@@ -155,8 +183,6 @@ def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
- With default parameters, is equivalent to ``a.copy().fill(1)``.
-
Parameters
----------
a : array_like
@@ -171,6 +197,10 @@ def ones_like(a, dtype=None, order='K', subok=True):
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
Returns
-------
@@ -207,6 +237,111 @@ def ones_like(a, dtype=None, order='K', subok=True):
multiarray.copyto(res, 1, casting='unsafe')
return res
+def full(shape, fill_value, dtype=None, order='C'):
+ """
+ Return a new array of given shape and type, filled with `fill_value`.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ fill_value : scalar
+ Fill value.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ is chosen as `np.array(fill_value).dtype`.
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory.
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the given shape, dtype, and order.
+
+ See Also
+ --------
+ zeros_like : Return an array of zeros with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ empty_like : Return an empty array with shape and type of input.
+ full_like : Fill an array with shape and type of input.
+ zeros : Return a new array setting values to zero.
+ ones : Return a new array setting values to one.
+ empty : Return a new uninitialized array.
+
+ Examples
+ --------
+ >>> np.full((2, 2), np.inf)
+ array([[ inf, inf],
+ [ inf, inf]])
+ >>> np.full((2, 2), 10, dtype=np.int)
+ array([[10, 10],
+ [10, 10]])
+
+ """
+ a = empty(shape, dtype, order)
+ multiarray.copyto(a, fill_value, casting='unsafe')
+ return a
+
+def full_like(a, fill_value, dtype=None, order='K', subok=True):
+ """
+ Return a full array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ fill_value : scalar
+ Fill value.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the same shape and type as `a`.
+
+ See Also
+ --------
+ zeros_like : Return an array of zeros with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ empty_like : Return an empty array with shape and type of input.
+ zeros : Return a new array setting values to zero.
+ ones : Return a new array setting values to one.
+ empty : Return a new uninitialized array.
+ full : Fill a new array.
+
+ Examples
+ --------
+ >>> x = np.arange(6, dtype=np.int)
+ >>> np.full_like(x, 1)
+ array([1, 1, 1, 1, 1, 1])
+ >>> np.full_like(x, 0.1)
+ array([0, 0, 0, 0, 0, 0])
+ >>> np.full_like(x, 0.1, dtype=np.double)
+ array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ >>> np.full_like(x, np.nan, dtype=np.double)
+ array([ nan, nan, nan, nan, nan, nan])
+
+ >>> y = np.arange(6, dtype=np.double)
+ >>> np.full_like(y, 0.1)
+ array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ multiarray.copyto(res, fill_value, casting='unsafe')
+ return res
+
def extend_all(module):
adict = {}
@@ -236,6 +371,7 @@ fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
+may_share_memory = multiarray.may_share_memory
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
@@ -842,13 +978,16 @@ def outer(a,b):
Parameters
----------
- a, b : array_like, shape (M,), (N,)
- First and second input vectors. Inputs are flattened if they
- are not already 1-dimensional.
+ a : (M,) array_like
+ First input vector. Input is flattened if
+ not already 1-dimensional.
+ b : (N,) array_like
+ Second input vector. Input is flattened if
+ not already 1-dimensional.
Returns
-------
- out : ndarray, shape (M, N)
+ out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
@@ -904,7 +1043,7 @@ def outer(a,b):
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
- from _dotblas import dot, vdot, inner, alterdot, restoredot
+ from ._dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
@@ -921,27 +1060,24 @@ def tensordot(a, b, axes=2):
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
- ``a`` and ``b``, and an array_like object containing two array_like
- objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
+ `a` and `b`, and an array_like object containing two array_like
+ objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
- dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
+ dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
-
axes : variable type
-
- * integer_like scalar
- Number of axes to sum over (applies to both arrays); or
-
- * array_like, shape = (2,), both elements array_like
- Axes to be summed over, first sequence applying to ``a``, second
- to ``b``.
+ * integer_like scalar
+ Number of axes to sum over (applies to both arrays); or
+ * (2,) array_like, both elements array_like of the same length
+ List of axes to be summed over, first sequence applying to `a`,
+ second to `b`.
See Also
--------
@@ -950,7 +1086,7 @@ def tensordot(a, b, axes=2):
Notes
-----
When there is more than one axis to sum over - and they are not the last
- (first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
+ (first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
@@ -1033,8 +1169,8 @@ def tensordot(a, b, axes=2):
try:
iter(axes)
except:
- axes_a = range(-axes,0)
- axes_b = range(0,axes)
+ axes_a = list(range(-axes,0))
+ axes_b = list(range(0,axes))
else:
axes_a, axes_b = axes
try:
@@ -1058,7 +1194,7 @@ def tensordot(a, b, axes=2):
equal = True
if (na != nb): equal = False
else:
- for k in xrange(na):
+ for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
@@ -1146,15 +1282,19 @@ def roll(a, shift, axis=None):
n = a.size
reshape = True
else:
- n = a.shape[axis]
+ try:
+ n = a.shape[axis]
+ except IndexError:
+ raise ValueError('axis must be >= 0 and < %d' % a.ndim)
reshape = False
+ if n == 0:
+ return a
shift %= n
- indexes = concatenate((arange(n-shift,n),arange(n-shift)))
+ indexes = concatenate((arange(n - shift, n), arange(n - shift)))
res = a.take(indexes, axis)
if reshape:
- return res.reshape(a.shape)
- else:
- return res
+ res = res.reshape(a.shape)
+ return res
def rollaxis(a, axis, start=0):
"""
@@ -1206,7 +1346,7 @@ def rollaxis(a, axis, start=0):
start -= 1
if axis==start:
return a
- axes = range(0,n)
+ axes = list(range(0,n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
@@ -1357,7 +1497,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
#Use numarray's printing function
-from arrayprint import array2string, get_printoptions, set_printoptions
+from .arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
@@ -1850,9 +1990,6 @@ def base_repr(number, base=2, padding=0):
res.append('-')
return ''.join(reversed(res or '0'))
-from cPickle import load, loads
-_cload = load
-_file = open
def load(file):
"""
@@ -1869,8 +2006,8 @@ def load(file):
"""
if isinstance(file, type("")):
- file = _file(file,"rb")
- return _cload(file)
+ file = open(file, "rb")
+ return pickle.load(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
@@ -1987,7 +2124,11 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8):
x = x[~xinf]
y = y[~xinf]
- return all(less_equal(abs(x-y), atol + rtol * abs(y)))
+ # ignore invalid fpe's
+ with errstate(invalid='ignore'):
+ r = all(less_equal(abs(x-y), atol + rtol * abs(y)))
+
+ return r
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
@@ -2049,11 +2190,8 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
array([True, True])
"""
def within_tol(x, y, atol, rtol):
- err = seterr(invalid='ignore')
- try:
+ with errstate(invalid='ignore'):
result = less_equal(abs(x-y), atol + rtol * abs(y))
- finally:
- seterr(**err)
if isscalar(a) and isscalar(b):
result = bool(result)
return result
@@ -2120,7 +2258,7 @@ def array_equal(a1, a2):
return False
if a1.shape != a2.shape:
return False
- return bool(equal(a1,a2).all())
+ return bool(asarray(a1 == a2).all())
def array_equiv(a1, a2):
"""
@@ -2162,7 +2300,7 @@ def array_equiv(a1, a2):
except:
return False
try:
- return bool(equal(a1,a2).all())
+ return bool(asarray(a1 == a2).all())
except ValueError:
return False
@@ -2441,8 +2579,8 @@ def seterrcall(func):
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
- if func is not None and not callable(func):
- if not hasattr(func, 'write') or not callable(func.write):
+ if func is not None and not isinstance(func, collections.Callable):
+ if not hasattr(func, 'write') or not isinstance(func.write, collections.Callable):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
@@ -2563,15 +2701,18 @@ class errstate(object):
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
+
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
+
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
+
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
@@ -2584,6 +2725,6 @@ nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
-import fromnumeric
-from fromnumeric import *
+from . import fromnumeric
+from .fromnumeric import *
extend_all(fromnumeric)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index e39eb2232..1a5f31e4e 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -80,6 +80,7 @@ Exported symbols include:
\\-> object_ (not used much) (kind=O)
"""
+from __future__ import division, absolute_import, print_function
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
@@ -89,29 +90,30 @@ __all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
-from numpy.core.multiarray import typeinfo, ndarray, array, \
- empty, dtype, datetime_data, datetime_as_string, \
- busday_offset, busday_count, is_busday, busdaycalendar
+from numpy.core.multiarray import (
+ typeinfo, ndarray, array, empty, dtype, datetime_data,
+ datetime_as_string, busday_offset, busday_count, is_busday,
+ busdaycalendar
+ )
import types as _types
import sys
+from numpy.compat import bytes, long
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
-from __builtin__ import bool, int, long, float, complex, object, unicode, str
-from numpy.compat import bytes
-
if sys.version_info[0] >= 3:
- # Py3K
- class long(int):
- # Placeholder class -- this will not escape outside numerictypes.py
- pass
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
+else:
+ from __builtin__ import bool, int, float, complex, object, unicode, str
+
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
-_all_chars = map(chr, range(256))
+_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
@@ -783,7 +785,7 @@ _alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
- for name, val in typeinfo.iteritems():
+ for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
@@ -856,7 +858,7 @@ try:
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
- ScalarType = [int, float, complex, long, bool, bytes, str, memoryview]
+ ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 964b4a54e..d0f82a25c 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -34,17 +34,19 @@ Record arrays allow us to access fields as properties::
array([ 2., 2.])
"""
-# All of the functions allow formats to be a dtype
-__all__ = ['record', 'recarray', 'format_parser']
+from __future__ import division, absolute_import, print_function
-import numeric as sb
-from defchararray import chararray
-import numerictypes as nt
-import types
-import os
import sys
+import os
+
+from . import numeric as sb
+from .defchararray import chararray
+from . import numerictypes as nt
+from numpy.compat import isfileobj, bytes, long
+
+# All of the functions allow formats to be a dtype
+__all__ = ['record', 'recarray', 'format_parser']
-from numpy.compat import isfileobj, bytes
ndarray = sb.ndarray
@@ -169,12 +171,12 @@ class format_parser:
attribute """
if (names):
- if (type(names) in [types.ListType, types.TupleType]):
+ if (type(names) in [list, tuple]):
pass
- elif (type(names) == str):
+ elif isinstance(names, str):
names = names.split(',')
else:
- raise NameError("illegal input names %s" % `names`)
+ raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
@@ -436,7 +438,7 @@ class recarray(ndarray):
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
- raise exctype, value
+ raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
@@ -531,7 +533,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
raise ValueError("item in the array list must be an ndarray.")
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, nt.flexible):
- formats += `obj.itemsize`
+ formats += repr(obj.itemsize)
formats += ','
formats = formats[:-1]
@@ -593,15 +595,15 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
>>> r.col2
chararray(['dbe', 'de'],
dtype='|S3')
- >>> import cPickle
- >>> print cPickle.loads(cPickle.dumps(r))
+ >>> import pickle
+ >>> print pickle.loads(pickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
- arrlist = [sb.array(obj[..., i].tolist()) for i in xrange(nfields)]
+ arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
@@ -620,7 +622,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
- for k in xrange(_array.size):
+ for k in range(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
diff --git a/numpy/core/scons_support.py b/numpy/core/scons_support.py
deleted file mode 100644
index 048f85db6..000000000
--- a/numpy/core/scons_support.py
+++ /dev/null
@@ -1,272 +0,0 @@
-#! Last Change: Sun Apr 26 05:00 PM 2009 J
-
-"""Code to support special facilities to scons which are only useful for
-numpy.core, hence not put into numpy.distutils.scons"""
-
-import sys
-import os
-
-from os.path import join as pjoin, dirname as pdirname, basename as pbasename
-from copy import deepcopy
-
-import code_generators
-from code_generators.generate_numpy_api import \
- do_generate_api as nowrap_do_generate_numpy_api
-from code_generators.generate_ufunc_api import \
- do_generate_api as nowrap_do_generate_ufunc_api
-from setup_common import check_api_version as _check_api_version
-from setup_common import \
- LONG_DOUBLE_REPRESENTATION_SRC, pyod, long_double_representation
-
-from numscons.numdist import process_c_str as process_str
-
-import SCons.Node
-import SCons
-from SCons.Builder import Builder
-from SCons.Action import Action
-
-def check_api_version(apiversion):
- return _check_api_version(apiversion, pdirname(code_generators.__file__))
-
-def split_ext(string):
- sp = string.rsplit( '.', 1)
- if len(sp) == 1:
- return (sp[0], '')
- else:
- return sp
-#------------------------------------
-# Ufunc and multiarray API generators
-#------------------------------------
-def do_generate_numpy_api(target, source, env):
- nowrap_do_generate_numpy_api([str(i) for i in target],
- [s.value for s in source])
- return 0
-
-def do_generate_ufunc_api(target, source, env):
- nowrap_do_generate_ufunc_api([str(i) for i in target],
- [s.value for s in source])
- return 0
-
-def generate_api_emitter(target, source, env):
- """Returns the list of targets generated by the code generator for array
- api and ufunc api."""
- base, ext = split_ext(str(target[0]))
- dir = pdirname(base)
- ba = pbasename(base)
- h = pjoin(dir, '__' + ba + '.h')
- c = pjoin(dir, '__' + ba + '.c')
- txt = base + '.txt'
- #print h, c, txt
- t = [h, c, txt]
- return (t, source)
-
-#-------------------------
-# From template generators
-#-------------------------
-# XXX: this is general and can be used outside numpy.core.
-def do_generate_from_template(targetfile, sourcefile, env):
- t = open(targetfile, 'w')
- s = open(sourcefile, 'r')
- allstr = s.read()
- s.close()
- writestr = process_str(allstr)
- t.write(writestr)
- t.close()
- return 0
-
-def generate_from_template(target, source, env):
- for t, s in zip(target, source):
- do_generate_from_template(str(t), str(s), env)
-
-def generate_from_template_emitter(target, source, env):
- base, ext = split_ext(pbasename(str(source[0])))
- t = pjoin(pdirname(str(target[0])), base)
- return ([t], source)
-
-#----------------
-# umath generator
-#----------------
-def do_generate_umath(targetfile, sourcefile, env):
- t = open(targetfile, 'w')
- from code_generators import generate_umath
- code = generate_umath.make_code(generate_umath.defdict, generate_umath.__file__)
- t.write(code)
- t.close()
-
-def generate_umath(target, source, env):
- for t, s in zip(target, source):
- do_generate_umath(str(t), str(s), env)
-
-def generate_umath_emitter(target, source, env):
- t = str(target[0]) + '.c'
- return ([t], source)
-
-#-----------------------------------------
-# Other functions related to configuration
-#-----------------------------------------
-def CheckGCC4(context):
- src = """
-int
-main()
-{
-#if !(defined __GNUC__ && (__GNUC__ >= 4))
-die from an horrible death
-#endif
-}
-"""
-
- context.Message("Checking if compiled with gcc 4.x or above ... ")
- st = context.TryCompile(src, '.c')
-
- if st:
- context.Result(' yes')
- else:
- context.Result(' no')
- return st == 1
-
-def CheckBrokenMathlib(context, mathlib):
- src = """
-/* check whether libm is broken */
-#include <math.h>
-int main(int argc, char *argv[])
-{
- return exp(-720.) > 1.0; /* typically an IEEE denormal */
-}
-"""
-
- try:
- oldLIBS = deepcopy(context.env['LIBS'])
- except:
- oldLIBS = []
-
- try:
- context.Message("Checking if math lib %s is usable for numpy ... " % mathlib)
- context.env.AppendUnique(LIBS = mathlib)
- st = context.TryRun(src, '.c')
- finally:
- context.env['LIBS'] = oldLIBS
-
- if st[0]:
- context.Result(' Yes !')
- else:
- context.Result(' No !')
- return st[0]
-
-def check_mlib(config, mlib):
- """Return 1 if mlib is available and usable by numpy, 0 otherwise.
-
- mlib can be a string (one library), or a list of libraries."""
- # Check the libraries in mlib are linkable
- if len(mlib) > 0:
- # XXX: put an autoadd argument to 0 here and add an autoadd argument to
- # CheckBroekenMathlib (otherwise we may add bogus libraries, the ones
- # which do not path the CheckBrokenMathlib test).
- st = config.CheckLib(mlib)
- if not st:
- return 0
- # Check the mlib is usable by numpy
- return config.CheckBrokenMathlib(mlib)
-
-def check_mlibs(config, mlibs):
- for mlib in mlibs:
- if check_mlib(config, mlib):
- return mlib
-
- # No mlib was found.
- raise SCons.Errors.UserError("No usable mathlib was found: chose another "\
- "one using the MATHLIB env variable, eg "\
- "'MATHLIB=m python setup.py build'")
-
-
-def is_npy_no_signal():
- """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
- header."""
- return sys.platform == 'win32'
-
-def define_no_smp():
- """Returns True if we should define NPY_NOSMP, False otherwise."""
- #--------------------------------
- # Checking SMP and thread options
- #--------------------------------
- # Python 2.3 causes a segfault when
- # trying to re-acquire the thread-state
- # which is done in error-handling
- # ufunc code. NPY_ALLOW_C_API and friends
- # cause the segfault. So, we disable threading
- # for now.
- if sys.version[:5] < '2.4.2':
- nosmp = 1
- else:
- # Perhaps a fancier check is in order here.
- # so that threads are only enabled if there
- # are actually multiple CPUS? -- but
- # threaded code can be nice even on a single
- # CPU so that long-calculating code doesn't
- # block.
- try:
- nosmp = os.environ['NPY_NOSMP']
- nosmp = 1
- except KeyError:
- nosmp = 0
- return nosmp == 1
-
-# Inline check
-def CheckInline(context):
- context.Message("Checking for inline keyword... ")
- body = """
-#ifndef __cplusplus
-static %(inline)s int static_func (void)
-{
- return 0;
-}
-%(inline)s int nostatic_func (void)
-{
- return 0;
-}
-#endif"""
- inline = None
- for kw in ['inline', '__inline__', '__inline']:
- st = context.TryCompile(body % {'inline': kw}, '.c')
- if st:
- inline = kw
- break
-
- if inline:
- context.Result(inline)
- else:
- context.Result(0)
- return inline
-
-def CheckLongDoubleRepresentation(context):
- msg = {
- 'INTEL_EXTENDED_12_BYTES_LE': "Intel extended, little endian",
- 'INTEL_EXTENDED_16_BYTES_LE': "Intel extended, little endian",
- 'IEEE_QUAD_BE': "IEEE Quad precision, big endian",
- 'IEEE_QUAD_LE': "IEEE Quad precision, little endian",
- 'IEEE_DOUBLE_LE': "IEEE Double precision, little endian",
- 'IEEE_DOUBLE_BE': "IEEE Double precision, big endian"
- }
-
- context.Message("Checking for long double representation... ")
- body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
- st = context.TryCompile(body, '.c')
- if st:
- obj = str(context.sconf.lastTarget)
- tp = long_double_representation(pyod(obj))
- context.Result(msg[tp])
- return tp
- if not st:
- context.Result(0)
-
-array_api_gen_bld = Builder(action = Action(do_generate_numpy_api, '$ARRAYPIGENCOMSTR'),
- emitter = generate_api_emitter)
-
-
-ufunc_api_gen_bld = Builder(action = Action(do_generate_ufunc_api, '$UFUNCAPIGENCOMSTR'),
- emitter = generate_api_emitter)
-
-template_bld = Builder(action = Action(generate_from_template, '$TEMPLATECOMSTR'),
- emitter = generate_from_template_emitter)
-
-umath_bld = Builder(action = Action(generate_umath, '$UMATHCOMSTR'),
- emitter = generate_umath_emitter)
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index a1000ae0b..03d26c2b6 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -1,22 +1,25 @@
+from __future__ import division, print_function
+
import imp
import os
import sys
import shutil
+import pickle
+import copy
+import warnings
+import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
-import warnings
-import re
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
-try:
- val = os.environ['NPY_SEPARATE_COMPILATION']
- ENABLE_SEPARATE_COMPILATION = (val != "0")
-except KeyError:
- ENABLE_SEPARATE_COMPILATION = False
+ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
+# Set to True to enable relaxed strides checking. This (mostly) means
+# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
@@ -24,11 +27,9 @@ except KeyError:
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
-try:
- import cPickle as _pik
-except ImportError:
- import pickle as _pik
-import copy
+#
+# Use pickle in all cases, as cPickle is gone in python3 and the difference
+# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
@@ -39,25 +40,25 @@ class CallOnceOnly(object):
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
- self._check_types = _pik.dumps(out)
+ self._check_types = pickle.dumps(out)
else:
- out = copy.deepcopy(_pik.loads(self._check_types))
+ out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
- self._check_ieee_macros = _pik.dumps(out)
+ self._check_ieee_macros = pickle.dumps(out)
else:
- out = copy.deepcopy(_pik.loads(self._check_ieee_macros))
+ out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
- self._check_complex = _pik.dumps(out)
+ self._check_complex = pickle.dumps(out)
else:
- out = copy.deepcopy(_pik.loads(self._check_complex))
+ out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
@@ -104,7 +105,7 @@ def win32_checks(deflist):
a = get_build_architecture()
# Distutils hack on AMD64 on windows
- print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \
+ print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
@@ -153,14 +154,27 @@ def check_math_capabilities(config, moredefs, mathlibs):
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
- if sys.version_info[:2] >= (2, 5):
- for f in OPTIONAL_STDFUNCS_MAYBE:
- if config.check_decl(fname2def(f),
- headers=["Python.h", "math.h"]):
- OPTIONAL_STDFUNCS.remove(f)
+ for f in OPTIONAL_STDFUNCS_MAYBE:
+ if config.check_decl(fname2def(f),
+ headers=["Python.h", "math.h"]):
+ OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
+ for h in OPTIONAL_HEADERS:
+ if config.check_func("", decl=False, call=False, headers=[h]):
+ moredefs.append((fname2def(h).replace(".", "_"), 1))
+
+ for f, args in OPTIONAL_INTRINSICS:
+ if config.check_func(f, decl=False, call=True, call_args=args):
+ moredefs.append((fname2def(f), 1))
+
+ for dec, fn in OPTIONAL_GCC_ATTRIBUTES:
+ if config.check_funcs_once([fn],
+ decl=dict((('%s %s' % (dec, fn), True),)),
+ call=False):
+ moredefs.append((fname2def(fn), 1))
+
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
@@ -221,19 +235,16 @@ def check_ieee_macros(config):
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
- if sys.version_info[:2] >= (2, 6):
- for f in _macros:
- py_symbol = fname2def("decl_%s" % f)
- already_declared = config.check_decl(py_symbol,
- headers=["Python.h", "math.h"])
- if already_declared:
- if config.check_macro_true(py_symbol,
- headers=["Python.h", "math.h"]):
- pub.append('NPY_%s' % fname2def("decl_%s" % f))
- else:
- macros.append(f)
- else:
- macros = _macros[:]
+ for f in _macros:
+ py_symbol = fname2def("decl_%s" % f)
+ already_declared = config.check_decl(py_symbol,
+ headers=["Python.h", "math.h"])
+ if already_declared:
+ if config.check_macro_true(py_symbol,
+ headers=["Python.h", "math.h"]):
+ pub.append('NPY_%s' % fname2def("decl_%s" % f))
+ else:
+ macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
@@ -437,11 +448,15 @@ def configuration(parent_package='',top_path=None):
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
+ 'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE']:
@@ -486,7 +501,7 @@ def configuration(parent_package='',top_path=None):
else:
mathlibs = []
target_f = open(target)
- for line in target_f.readlines():
+ for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
@@ -534,6 +549,9 @@ def configuration(parent_package='',top_path=None):
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
+ if NPY_RELAXED_STRIDES_CHECKING:
+ moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
+
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
@@ -599,6 +617,8 @@ def configuration(parent_package='',top_path=None):
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
+ config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
+
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src','npymath','_signbit.c'),
@@ -673,7 +693,8 @@ def configuration(parent_package='',top_path=None):
config.add_library('npysort',
sources = [join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
- join('src', 'npysort', 'heapsort.c.src')])
+ join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'npysort', 'selection.c.src')])
#######################################################################
@@ -746,7 +767,7 @@ def configuration(parent_package='',top_path=None):
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
- join('include', 'numpy', 'npy_deprecated_api.h'),
+ join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
]
@@ -823,7 +844,9 @@ def configuration(parent_package='',top_path=None):
subpath = join('src', 'umath')
# NOTE: For manual template conversion of loops.h.src, read the note
# in that file.
- sources = [join(local_dir, subpath, 'loops.c.src')]
+ sources = [
+ join(local_dir, subpath, 'loops.c.src'),
+ join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
@@ -850,12 +873,14 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
+ join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
+ join('src', 'umath', 'simd.inc.src'),
join(codegen_dir,'generate_ufunc_api.py')]
if not ENABLE_SEPARATE_COMPILATION:
@@ -863,6 +888,7 @@ def configuration(parent_package='',top_path=None):
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
+ umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
@@ -919,12 +945,33 @@ def configuration(parent_package='',top_path=None):
sources = [join('src','umath', 'umath_tests.c.src')])
#######################################################################
+ # custom rational dtype module #
+ #######################################################################
+
+ config.add_extension('test_rational',
+ sources = [join('src','umath', 'test_rational.c.src')])
+
+ #######################################################################
+ # struct_ufunc_test module #
+ #######################################################################
+
+ config.add_extension('struct_ufunc_test',
+ sources = [join('src','umath', 'struct_ufunc_test.c.src')])
+
+ #######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
+ #######################################################################
+ # operand_flag_tests module #
+ #######################################################################
+
+ config.add_extension('operand_flag_tests',
+ sources = [join('src','umath', 'operand_flag_tests.c.src')])
+
config.add_data_dir('tests')
config.add_data_dir('tests/data')
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 83589695d..7f0ad8bfe 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -1,4 +1,6 @@
-# Code shared by distutils and scons builds
+from __future__ import division, absolute_import, print_function
+
+# Code common to build tools
import sys
from os.path import join
import warnings
@@ -29,7 +31,10 @@ C_ABI_VERSION = 0x01000009
# without breaking binary compatibility. In this case, only the C_API_VERSION
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
-C_API_VERSION = 0x00000008
+#
+# 0x00000008 - 1.7.x
+# 0x00000009 - 1.8.x
+C_API_VERSION = 0x00000009
class MismatchCAPIWarning(Warning):
pass
@@ -95,6 +100,30 @@ OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter"]
+
+OPTIONAL_HEADERS = [
+# sse headers only enabled automatically on amd64/x32 builds
+ "xmmintrin.h", # SSE
+ "emmintrin.h", # SSE2
+]
+
+# optional gcc compiler builtins and their call arguments
+# call arguments are required as the compiler will do strict signature checking
+OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
+ ("__builtin_isinf", '5.'),
+ ("__builtin_isfinite", '5.'),
+ ("__builtin_bswap32", '5u'),
+ ("__builtin_bswap64", '5u'),
+ ("__builtin_expect", '5, 0'),
+ ]
+
+# gcc function attributes
+# (attribute as understood by gcc, function name),
+# function name will be converted to HAVE_<upper-case-name> preprocessor macro
+OPTIONAL_GCC_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
+ 'attribute_optimize_unroll_loops'),
+ ]
+
# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot",
"copysign"]
@@ -161,13 +190,14 @@ def pyod(filename):
Parameters
----------
- filename: str
+ filename : str
name of the file to get the dump from.
Returns
-------
- out: seq
+ out : seq
list of lines of od output
+
Note
----
We only implement enough to get the necessary information for long double
@@ -216,6 +246,8 @@ _INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000']
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000', '000', '000', '000', '000']
+_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
+ '242', '240', '000', '000', '000', '000']
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
@@ -249,6 +281,8 @@ def long_double_representation(lines):
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
+ if read[12:-8] == _MOTOROLA_EXTENDED_12B:
+ return 'MOTOROLA_EXTENDED_12_BYTES_BE'
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
diff --git a/numpy/core/setupscons.py b/numpy/core/setupscons.py
deleted file mode 100644
index 4d329fded..000000000
--- a/numpy/core/setupscons.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import os
-import sys
-import glob
-from os.path import join, basename
-
-from numpy.distutils import log
-
-from numscons import get_scons_build_dir
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration,dot_join
- from numpy.distutils.command.scons import get_scons_pkg_build_dir
- from numpy.distutils.system_info import get_info, default_lib_dirs
-
- config = Configuration('core',parent_package,top_path)
- local_dir = config.local_path
-
- header_dir = 'include/numpy' # this is relative to config.path_in_package
-
- config.add_subpackage('code_generators')
-
- # List of files to register to numpy.distutils
- dot_blas_src = [join('blasdot', '_dotblas.c'),
- join('blasdot', 'cblas.h')]
- api_definition = [join('code_generators', 'numpy_api_order.txt'),
- join('code_generators', 'ufunc_api_order.txt')]
- core_src = [join('src', basename(i)) for i in glob.glob(join(local_dir,
- 'src',
- '*.c'))]
- core_src += [join('src', basename(i)) for i in glob.glob(join(local_dir,
- 'src',
- '*.src'))]
-
- source_files = dot_blas_src + api_definition + core_src + \
- [join(header_dir, 'numpyconfig.h.in')]
-
- # Add generated files to distutils...
- def add_config_header():
- scons_build_dir = get_scons_build_dir()
- # XXX: I really have to think about how to communicate path info
- # between scons and distutils, and set the options at one single
- # location.
- target = join(get_scons_pkg_build_dir(config.name), 'config.h')
- incl_dir = os.path.dirname(target)
- if incl_dir not in config.numpy_include_dirs:
- config.numpy_include_dirs.append(incl_dir)
-
- def add_numpyconfig_header():
- scons_build_dir = get_scons_build_dir()
- # XXX: I really have to think about how to communicate path info
- # between scons and distutils, and set the options at one single
- # location.
- target = join(get_scons_pkg_build_dir(config.name),
- 'include/numpy/numpyconfig.h')
- incl_dir = os.path.dirname(target)
- if incl_dir not in config.numpy_include_dirs:
- config.numpy_include_dirs.append(incl_dir)
- config.add_data_files((header_dir, target))
-
- def add_array_api():
- scons_build_dir = get_scons_build_dir()
- # XXX: I really have to think about how to communicate path info
- # between scons and distutils, and set the options at one single
- # location.
- h_file = join(get_scons_pkg_build_dir(config.name),
- 'include/numpy/__multiarray_api.h')
- t_file = join(get_scons_pkg_build_dir(config.name),
- 'include/numpy/multiarray_api.txt')
- config.add_data_files((header_dir, h_file),
- (header_dir, t_file))
-
- def add_ufunc_api():
- scons_build_dir = get_scons_build_dir()
- # XXX: I really have to think about how to communicate path info
- # between scons and distutils, and set the options at one single
- # location.
- h_file = join(get_scons_pkg_build_dir(config.name),
- 'include/numpy/__ufunc_api.h')
- t_file = join(get_scons_pkg_build_dir(config.name),
- 'include/numpy/ufunc_api.txt')
- config.add_data_files((header_dir, h_file),
- (header_dir, t_file))
-
- def add_generated_files(*args, **kw):
- add_config_header()
- add_numpyconfig_header()
- add_array_api()
- add_ufunc_api()
-
- config.add_sconscript('SConstruct',
- post_hook = add_generated_files,
- source_files = source_files)
- config.add_scons_installed_library('npymath', 'lib')
-
- config.add_data_files('include/numpy/*.h')
- config.add_include_dirs('src')
-
- config.numpy_include_dirs.extend(config.paths('include'))
-
- # Don't install fenv unless we need them.
- if sys.platform == 'cygwin':
- config.add_data_dir('include/numpy/fenv')
-
- config.add_data_dir('tests')
- config.make_svn_version_py()
-
- return config
-
-if __name__=='__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 261379077..ee0b1342e 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -1,7 +1,9 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack']
-import numeric as _nx
-from numeric import array, asanyarray, newaxis
+from . import numeric as _nx
+from .numeric import array, asanyarray, newaxis
def atleast_1d(*arys):
"""
@@ -223,7 +225,7 @@ def vstack(tup):
[4]])
"""
- return _nx.concatenate(map(atleast_2d,tup),0)
+ return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
"""
@@ -267,7 +269,7 @@ def hstack(tup):
[3, 4]])
"""
- arrs = map(atleast_1d,tup)
+ arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 7ec3cd907..8cfd5d851 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -10,7 +10,7 @@ NPY_NO_EXPORT int _days_per_month_table[2][12];
#endif
NPY_NO_EXPORT void
-numpy_pydatetime_import();
+numpy_pydatetime_import(void);
/*
* Returns 1 if the given year is a leap year, 0 otherwise.
diff --git a/numpy/core/src/multiarray/array_assign.c b/numpy/core/src/multiarray/array_assign.c
index 4f350388b..fa764d758 100644
--- a/numpy/core/src/multiarray/array_assign.c
+++ b/numpy/core/src/multiarray/array_assign.c
@@ -21,6 +21,8 @@
#include "shape.h"
#include "array_assign.h"
+#include "common.h"
+#include "lowlevel_strided_loops.h"
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
@@ -91,7 +93,7 @@ raw_array_is_aligned(int ndim, char *data, npy_intp *strides, int alignment)
align_check |= strides[idim];
}
- return ((align_check & (alignment - 1)) == 0);
+ return npy_is_aligned((void *)align_check, alignment);
}
else {
return 1;
@@ -102,36 +104,14 @@ raw_array_is_aligned(int ndim, char *data, npy_intp *strides, int alignment)
/* Gets a half-open range [start, end) which contains the array data */
NPY_NO_EXPORT void
get_array_memory_extents(PyArrayObject *arr,
- npy_uintp *out_start, npy_uintp *out_end)
+ npy_uintp *out_start, npy_uintp *out_end)
{
- npy_uintp start, end;
- npy_intp idim, ndim = PyArray_NDIM(arr);
- npy_intp *dimensions = PyArray_DIMS(arr),
- *strides = PyArray_STRIDES(arr);
-
- /* Calculate with a closed range [start, end] */
- start = end = (npy_uintp)PyArray_DATA(arr);
- for (idim = 0; idim < ndim; ++idim) {
- npy_intp stride = strides[idim], dim = dimensions[idim];
- /* If the array size is zero, return an empty range */
- if (dim == 0) {
- *out_start = *out_end = (npy_uintp)PyArray_DATA(arr);
- return;
- }
- /* Expand either upwards or downwards depending on stride */
- else {
- if (stride > 0) {
- end += stride*(dim-1);
- }
- else if (stride < 0) {
- start += stride*(dim-1);
- }
- }
- }
-
- /* Return a half-open range */
- *out_start = start;
- *out_end = end + PyArray_DESCR(arr)->elsize;
+ npy_intp low, upper;
+ offset_bounds_from_strides(PyArray_ITEMSIZE(arr), PyArray_NDIM(arr),
+ PyArray_DIMS(arr), PyArray_STRIDES(arr),
+ &low, &upper);
+ *out_start = (npy_uintp)PyArray_DATA(arr) + (npy_uintp)low;
+ *out_end = (npy_uintp)PyArray_DATA(arr) + (npy_uintp)upper;
}
/* Returns 1 if the arrays have overlapping data, 0 otherwise */
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 2c1154264..df7facad6 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -48,7 +48,7 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
/* Check alignment */
aligned = raw_array_is_aligned(ndim, dst_data, dst_strides,
dst_dtype->alignment);
- if (((npy_intp)src_data & (src_dtype->alignment - 1)) != 0) {
+ if (!npy_is_aligned(src_data, src_dtype->alignment)) {
aligned = 0;
}
@@ -119,7 +119,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
/* Check alignment */
aligned = raw_array_is_aligned(ndim, dst_data, dst_strides,
dst_dtype->alignment);
- if (((npy_intp)src_data & (src_dtype->alignment - 1)) != 0) {
+ if (!npy_is_aligned(src_data, src_dtype->alignment)) {
aligned = 0;
}
@@ -220,7 +220,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* we also skip this if 'dst' has an object dtype.
*/
if ((!PyArray_EquivTypes(PyArray_DESCR(dst), src_dtype) ||
- ((npy_intp)src_data & (src_dtype->alignment - 1)) != 0) &&
+ !npy_is_aligned(src_data, src_dtype->alignment)) &&
PyArray_SIZE(dst) > 1 &&
!PyDataType_REFCHK(PyArray_DESCR(dst))) {
char *tmp_src_data;
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index f0e9e36a5..6db6060c9 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -49,6 +49,7 @@ maintainer email: oliphant.travis@ieee.org
#include "getset.h"
#include "sequence.h"
#include "buffer.h"
+#include "array_assign.h"
/*NUMPY_API
Compute the size of an array (in number of items)
@@ -103,7 +104,7 @@ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
/*
* Unlike PyArray_SetBaseObject, we do not compress the chain of base
* references.
- */
+ */
((PyArrayObject_fields *)arr)->base = (PyObject *)base;
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY);
PyArray_CLEARFLAGS(base, NPY_ARRAY_WRITEABLE);
@@ -143,11 +144,12 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj)
}
/*
- * Don't allow chains of views, always set the base
- * to the owner of the data. That is, either the first object
- * which isn't an array, or the first object which owns
- * its own data.
+ * Don't allow infinite chains of views, always set the base
+ * to the first owner of the data.
+ * That is, either the first object which isn't an array,
+ * or the first object which owns its own data.
*/
+
while (PyArray_Check(obj) && (PyObject *)arr != obj) {
PyArrayObject *obj_arr = (PyArrayObject *)obj;
PyObject *tmp;
@@ -155,17 +157,25 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj)
/* Propagate WARN_ON_WRITE through views. */
if (PyArray_FLAGS(obj_arr) & NPY_ARRAY_WARN_ON_WRITE) {
PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WARN_ON_WRITE);
- }
+ }
/* If this array owns its own data, stop collapsing */
if (PyArray_CHKFLAGS(obj_arr, NPY_ARRAY_OWNDATA)) {
break;
- }
- /* If there's no base, stop collapsing */
+ }
+
tmp = PyArray_BASE(obj_arr);
+ /* If there's no base, stop collapsing */
if (tmp == NULL) {
break;
}
+ /* Stop the collapse new base when the would not be of the same
+ * type (i.e. different subclass).
+ */
+ if (Py_TYPE(tmp) != Py_TYPE(arr)) {
+ break;
+ }
+
Py_INCREF(tmp);
Py_DECREF(obj);
@@ -695,7 +705,7 @@ array_might_be_written(PyArrayObject *obj)
const char *msg =
"Numpy has detected that you (may be) writing to an array returned\n"
"by numpy.diagonal or by selecting multiple fields in a record\n"
- "array. This code will likely break in the next numpy release --\n"
+ "array. This code will likely break in a future numpy release --\n"
"see numpy.diagonal or arrays.indexing reference docs for details.\n"
"The quick fix is to make an explicit copy (e.g., do\n"
"arr.diagonal().copy() or arr[['f0','f1']].copy()).";
@@ -1163,12 +1173,12 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
if NPY_TITLE_KEY(key, value) {
continue;
}
- a = PyArray_EnsureAnyArray(array_subscript(self, key));
+ a = array_subscript_asarray(self, key);
if (a == NULL) {
Py_XDECREF(res);
return NULL;
}
- b = array_subscript(other, key);
+ b = array_subscript_asarray(other, key);
if (b == NULL) {
Py_XDECREF(res);
Py_DECREF(a);
@@ -1255,7 +1265,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
{
PyArrayObject *array_other;
PyObject *result = NULL;
- PyArray_Descr *dtype = NULL;
switch (cmp_op) {
case Py_LT:
@@ -1271,30 +1280,32 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
Py_INCREF(Py_False);
return Py_False;
}
- /* Make sure 'other' is an array */
- if (PyArray_TYPE(self) == NPY_OBJECT) {
- dtype = PyArray_DTYPE(self);
- Py_INCREF(dtype);
- }
- array_other = (PyArrayObject *)PyArray_FromAny(other, dtype, 0, 0, 0,
- NULL);
+ result = PyArray_GenericBinaryFunction(self,
+ (PyObject *)other,
+ n_ops.equal);
+ if (result && result != Py_NotImplemented)
+ break;
+
/*
- * If not successful, indicate that the items cannot be compared
- * this way.
+ * The ufunc does not support void/structured types, so these
+ * need to be handled specifically. Only a few cases are supported.
*/
- if (array_other == NULL) {
- PyErr_Clear();
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
- result = PyArray_GenericBinaryFunction(self,
- (PyObject *)array_other,
- n_ops.equal);
- if ((result == Py_NotImplemented) &&
- (PyArray_TYPE(self) == NPY_VOID)) {
+ if (PyArray_TYPE(self) == NPY_VOID) {
int _res;
+ array_other = (PyArrayObject *)PyArray_FromAny(other, NULL, 0, 0, 0,
+ NULL);
+ /*
+ * If not successful, indicate that the items cannot be compared
+ * this way.
+ */
+ if (array_other == NULL) {
+ PyErr_Clear();
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+
_res = PyObject_RichCompareBool
((PyObject *)PyArray_DESCR(self),
(PyObject *)PyArray_DESCR(array_other),
@@ -1316,7 +1327,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
* two array objects can not be compared together;
* indicate that
*/
- Py_DECREF(array_other);
if (result == NULL) {
PyErr_Clear();
Py_INCREF(Py_NotImplemented);
@@ -1328,29 +1338,31 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
Py_INCREF(Py_True);
return Py_True;
}
- /* Make sure 'other' is an array */
- if (PyArray_TYPE(self) == NPY_OBJECT) {
- dtype = PyArray_DTYPE(self);
- Py_INCREF(dtype);
- }
- array_other = (PyArrayObject *)PyArray_FromAny(other, dtype, 0, 0, 0,
- NULL);
+ result = PyArray_GenericBinaryFunction(self, (PyObject *)other,
+ n_ops.not_equal);
+ if (result && result != Py_NotImplemented)
+ break;
+
/*
- * If not successful, indicate that the items cannot be compared
- * this way.
+ * The ufunc does not support void/structured types, so these
+ * need to be handled specifically. Only a few cases are supported.
*/
- if (array_other == NULL) {
- PyErr_Clear();
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
- result = PyArray_GenericBinaryFunction(self, (PyObject *)array_other,
- n_ops.not_equal);
- if ((result == Py_NotImplemented) &&
- (PyArray_TYPE(self) == NPY_VOID)) {
+ if (PyArray_TYPE(self) == NPY_VOID) {
int _res;
+ array_other = (PyArrayObject *)PyArray_FromAny(other, NULL, 0, 0, 0,
+ NULL);
+ /*
+ * If not successful, indicate that the items cannot be compared
+ * this way.
+ */
+ if (array_other == NULL) {
+ PyErr_Clear();
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+
_res = PyObject_RichCompareBool(
(PyObject *)PyArray_DESCR(self),
(PyObject *)PyArray_DESCR(array_other),
@@ -1368,7 +1380,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
return result;
}
- Py_DECREF(array_other);
if (result == NULL) {
PyErr_Clear();
Py_INCREF(Py_NotImplemented);
@@ -1454,21 +1465,22 @@ NPY_NO_EXPORT npy_bool
PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset,
npy_intp *dims, npy_intp *newstrides)
{
- int i;
- npy_intp byte_begin;
- npy_intp begin;
- npy_intp end;
+ npy_intp begin, end;
+ npy_intp lower_offset;
+ npy_intp upper_offset;
if (numbytes == 0) {
numbytes = PyArray_MultiplyList(dims, nd) * elsize;
}
+
begin = -offset;
- end = numbytes - offset - elsize;
- for (i = 0; i < nd; i++) {
- byte_begin = newstrides[i]*(dims[i] - 1);
- if ((byte_begin < begin) || (byte_begin > end)) {
- return NPY_FALSE;
- }
+ end = numbytes - offset;
+
+ offset_bounds_from_strides(elsize, nd, dims, newstrides,
+ &lower_offset, &upper_offset);
+
+ if ((upper_offset > end) || (lower_offset < begin)) {
+ return NPY_FALSE;
}
return NPY_TRUE;
}
@@ -1643,7 +1655,7 @@ array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems))
{
PyObject *obj;
/* nitems will always be 0 */
- obj = (PyObject *)PyArray_malloc(NPY_SIZEOF_PYARRAYOBJECT);
+ obj = (PyObject *)PyArray_malloc(type->tp_basicsize);
PyObject_Init(obj, type);
return obj;
}
@@ -1682,8 +1694,6 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = {
(Py_TPFLAGS_DEFAULT
#if !defined(NPY_PY3K)
| Py_TPFLAGS_CHECKTYPES
-#endif
-#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x03000000)
| Py_TPFLAGS_HAVE_NEWBUFFER
#endif
| Py_TPFLAGS_BASETYPE), /* tp_flags */
@@ -1714,7 +1724,5 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index ad45cdb9e..52b787975 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -946,9 +946,9 @@ HALF_to_@TYPE@(npy_half *ip, @type@ *op, npy_intp n,
}
/**end repeat**/
-#if SIZEOF_SHORT == 2
+#if NPY_SIZEOF_SHORT == 2
#define HALF_to_HALF SHORT_to_SHORT
-#elif SIZEOF_INT == 2
+#elif NPY_SIZEOF_INT == 2
#define HALF_to_HALF INT_to_INT
#endif
@@ -1612,27 +1612,27 @@ static void
char *a, *b, c;
a = (char *)dst;
-#if SIZEOF_@fsize@ == 2
+#if NPY_SIZEOF_@fsize@ == 2
b = a + 1;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 4
+#elif NPY_SIZEOF_@fsize@ == 4
b = a + 3;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 8
+#elif NPY_SIZEOF_@fsize@ == 8
b = a + 7;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 10
+#elif NPY_SIZEOF_@fsize@ == 10
b = a + 9;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 12
+#elif NPY_SIZEOF_@fsize@ == 12
b = a + 11;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1640,7 +1640,7 @@ static void
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 16
+#elif NPY_SIZEOF_@fsize@ == 16
b = a + 15;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1744,7 +1744,7 @@ static void
if (swap) {
char *a, *b, c;
a = (char *)dst;
-#if SIZEOF_@fsize@ == 4
+#if NPY_SIZEOF_@fsize@ == 4
b = a + 3;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
@@ -1752,7 +1752,7 @@ static void
b = a + 3;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 8
+#elif NPY_SIZEOF_@fsize@ == 8
b = a + 7;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1764,7 +1764,7 @@ static void
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 10
+#elif NPY_SIZEOF_@fsize@ == 10
b = a + 9;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1778,7 +1778,7 @@ static void
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 12
+#elif NPY_SIZEOF_@fsize@ == 12
b = a + 11;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1794,7 +1794,7 @@ static void
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b = c;
-#elif SIZEOF_@fsize@ == 16
+#elif NPY_SIZEOF_@fsize@ == 16
b = a + 15;
c = *a; *a++ = *b; *b-- = c;
c = *a; *a++ = *b; *b-- = c;
@@ -1825,9 +1825,8 @@ static void
*a++ = *b;
*b-- = c;
}
- a += nn / 2;
+ a += nn;
b = a + (NPY_SIZEOF_@fsize@ - 1);
- nn = NPY_SIZEOF_@fsize@ / 2;
for (i = 0; i < nn; i++) {
c = *a;
*a++ = *b;
@@ -2322,7 +2321,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap)
* TODO: temporarily modifying the array like this
* is bad coding style, should be changed.
*/
- ((PyArrayObject_fields *)ap)->descr = descr;
+ ((PyArrayObject_fields *)ap)->descr = new;
((PyArrayObject_fields *)ap)->flags = savedflags;
if ((new->alignment > 1) && !__ALIGNED(ip + offset,
new->alignment)) {
@@ -3818,9 +3817,9 @@ NPY_NO_EXPORT PyArray_Descr @from@_Descr = {
/* type_num */
NPY_@from@,
/* elsize */
- @num@*sizeof(@fromtype@),
+ @num@ * sizeof(@fromtype@),
/* alignment */
- _ALIGN(@fromtype@),
+ @num@ * _ALIGN(@fromtype@),
/* subarray */
NULL,
/* fields */
@@ -3972,7 +3971,7 @@ initialize_builtin_datetime_metadata(void)
data2->meta.num = 1;
_builtin_descrs[NPY_DATETIME]->c_metadata = (NpyAuxData *)data1;
- _builtin_descrs[NPY_TIMEDELTA]->c_metadata = (NpyAuxData *)data1;
+ _builtin_descrs[NPY_TIMEDELTA]->c_metadata = (NpyAuxData *)data2;
return 0;
}
@@ -4089,8 +4088,6 @@ set_typeinfo(PyObject *dict)
infodict = PyDict_New();
if (infodict == NULL) return -1;
-#define BITSOF_INTP CHAR_BIT*SIZEOF_PY_INTPTR_T
-#define BITSOF_BYTE CHAR_BIT
/**begin repeat
*
@@ -4153,6 +4150,7 @@ set_typeinfo(PyObject *dict)
* CFLOAT, CDOUBLE, CLONGDOUBLE#
* #Name = Half, Float, Double, LongDouble,
* CFloat, CDouble, CLongDouble#
+ * #num = 1, 1, 1, 1, 2, 2, 2#
*/
PyDict_SetItemString(infodict, "@name@",
@@ -4163,7 +4161,7 @@ set_typeinfo(PyObject *dict)
#endif
NPY_@name@,
NPY_BITSOF_@name@,
- _ALIGN(@type@),
+ @num@ * _ALIGN(@type@),
(PyObject *) &Py@Name@ArrType_Type));
Py_DECREF(s);
@@ -4220,7 +4218,7 @@ set_typeinfo(PyObject *dict)
s = Py_BuildValue("ciiiNNO", NPY_DATETIMELTR,
#endif
NPY_DATETIME,
- sizeof(npy_datetime) * CHAR_BIT,
+ NPY_BITSOF_DATETIME,
_ALIGN(npy_datetime),
MyPyLong_FromInt64(NPY_MAX_DATETIME),
MyPyLong_FromInt64(NPY_MIN_DATETIME),
@@ -4233,7 +4231,7 @@ set_typeinfo(PyObject *dict)
s = Py_BuildValue("ciiiNNO",NPY_TIMEDELTALTR,
#endif
NPY_TIMEDELTA,
- sizeof(npy_timedelta) * CHAR_BIT,
+ NPY_BITSOF_TIMEDELTA,
_ALIGN(npy_timedelta),
MyPyLong_FromInt64(NPY_MAX_TIMEDELTA),
MyPyLong_FromInt64(NPY_MIN_TIMEDELTA),
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 530edbb1a..326b73b42 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -93,8 +93,6 @@ array_getcharbuf(PyArrayObject *self, Py_ssize_t segment, constchar **ptrptr)
* Py_buffers.
*************************************************************************/
-#if PY_VERSION_HEX >= 0x02060000
-
/*
* Format string translator
*
@@ -333,8 +331,10 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
}
if (is_native_only_type) {
- /* It's not possible to express native-only data types
- in non-native npy_byte orders */
+ /*
+ * It's not possible to express native-only data types
+ * in non-native npy_byte orders
+ */
PyErr_Format(PyExc_ValueError,
"cannot expose native-only dtype '%c' in "
"non-native byte order '%c' via buffer interface",
@@ -722,42 +722,24 @@ _array_dealloc_buffer_info(PyArrayObject *self)
}
}
-#else
-
-NPY_NO_EXPORT void
-_array_dealloc_buffer_info(PyArrayObject *self)
-{
-}
-
-#endif
/*************************************************************************/
NPY_NO_EXPORT PyBufferProcs array_as_buffer = {
#if !defined(NPY_PY3K)
-#if PY_VERSION_HEX >= 0x02050000
(readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/
(writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/
(segcountproc)array_getsegcount, /*bf_getsegcount*/
(charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/
-#else
- (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/
- (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/
- (getsegcountproc)array_getsegcount, /*bf_getsegcount*/
- (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/
-#endif
#endif
-#if PY_VERSION_HEX >= 0x02060000
(getbufferproc)array_getbuffer,
(releasebufferproc)0,
-#endif
};
/*************************************************************************
* Convert PEP 3118 format string to PyArray_Descr
*/
-#if PY_VERSION_HEX >= 0x02060000
NPY_NO_EXPORT PyArray_Descr*
_descriptor_from_pep3118_format(char *s)
@@ -817,15 +799,3 @@ _descriptor_from_pep3118_format(char *s)
}
return (PyArray_Descr*)descr;
}
-
-#else
-
-NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s)
-{
- PyErr_SetString(PyExc_RuntimeError,
- "PEP 3118 is not supported on Python versions < 2.6");
- return NULL;
-}
-
-#endif
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index c083a00f0..c75ac012a 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -14,6 +14,7 @@
#include "number.h"
#include "calculation.h"
+#include "array_assign.h"
static double
power_of_ten(int n)
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 5836e24ad..4c374cc75 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -7,6 +7,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "common.h"
#include "usertypes.h"
@@ -27,6 +28,64 @@
* be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a
* warning (that people's code will be broken in a future release.)
*/
+
+/*
+ * PyArray_GetAttrString_SuppressException:
+ *
+ * Stripped down version of PyObject_GetAttrString,
+ * avoids lookups for None, tuple, and List objects,
+ * and doesn't create a PyErr since this code ignores it.
+ *
+ * This can be much faster then PyObject_GetAttrString where
+ * exceptions are not used by caller.
+ *
+ * 'obj' is the object to search for attribute.
+ *
+ * 'name' is the attribute to search for.
+ *
+ * Returns attribute value on success, 0 on failure.
+ */
+PyObject *
+PyArray_GetAttrString_SuppressException(PyObject *obj, char *name)
+{
+ PyTypeObject *tp = Py_TYPE(obj);
+ PyObject *res = (PyObject *)NULL;
+
+ /* We do not need to check for special attributes on trivial types */
+ if (obj == Py_None ||
+ PyList_CheckExact(obj) ||
+ PyTuple_CheckExact(obj)) {
+ return NULL;
+ }
+
+ /* Attribute referenced by (char *)name */
+ if (tp->tp_getattr != NULL) {
+ res = (*tp->tp_getattr)(obj, name);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ }
+ /* Attribute referenced by (PyObject *)name */
+ else if (tp->tp_getattro != NULL) {
+#if defined(NPY_PY3K)
+ PyObject *w = PyUnicode_InternFromString(name);
+#else
+ PyObject *w = PyString_InternFromString(name);
+#endif
+ if (w == NULL) {
+ return (PyObject *)NULL;
+ }
+ res = (*tp->tp_getattro)(obj, w);
+ Py_DECREF(w);
+ if (res == NULL) {
+ PyErr_Clear();
+ }
+ }
+ return res;
+}
+
+
+
NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND;
@@ -147,9 +206,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
int i, size;
PyArray_Descr *dtype = NULL;
PyObject *ip;
-#if PY_VERSION_HEX >= 0x02060000
Py_buffer buffer_view;
-#endif
/* Check if it's an ndarray */
if (PyArray_Check(obj)) {
@@ -158,8 +215,17 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
goto promote_types;
}
+ /* See if it's a python None */
+ if (obj == Py_None) {
+ dtype = PyArray_DescrFromType(NPY_OBJECT);
+ if (dtype == NULL) {
+ goto fail;
+ }
+ Py_INCREF(dtype);
+ goto promote_types;
+ }
/* Check if it's a NumPy scalar */
- if (PyArray_IsScalar(obj, Generic)) {
+ else if (PyArray_IsScalar(obj, Generic)) {
if (!string_type) {
dtype = PyArray_DescrFromScalar(obj);
if (dtype == NULL) {
@@ -174,7 +240,15 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if ((temp = PyObject_Str(obj)) == NULL) {
return -1;
}
+#if defined(NPY_PY3K)
+ #if PY_VERSION_HEX >= 0x03030000
+ itemsize = PyUnicode_GetLength(temp);
+ #else
+ itemsize = PyUnicode_GET_SIZE(temp);
+ #endif
+#else
itemsize = PyString_GET_SIZE(temp);
+#endif
}
else if (string_type == NPY_UNICODE) {
#if defined(NPY_PY3K)
@@ -218,7 +292,15 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if ((temp = PyObject_Str(obj)) == NULL) {
return -1;
}
+#if defined(NPY_PY3K)
+ #if PY_VERSION_HEX >= 0x03030000
+ itemsize = PyUnicode_GetLength(temp);
+ #else
+ itemsize = PyUnicode_GET_SIZE(temp);
+ #endif
+#else
itemsize = PyString_GET_SIZE(temp);
+#endif
}
else if (string_type == NPY_UNICODE) {
#if defined(NPY_PY3K)
@@ -293,35 +375,36 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
goto promote_types;
}
-#if PY_VERSION_HEX >= 0x02060000
/* PEP 3118 buffer interface */
- memset(&buffer_view, 0, sizeof(Py_buffer));
- if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT|PyBUF_STRIDES) == 0 ||
- PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT) == 0) {
-
- PyErr_Clear();
- dtype = _descriptor_from_pep3118_format(buffer_view.format);
- PyBuffer_Release(&buffer_view);
- if (dtype) {
+ if (PyObject_CheckBuffer(obj) == 1) {
+ memset(&buffer_view, 0, sizeof(Py_buffer));
+ if (PyObject_GetBuffer(obj, &buffer_view,
+ PyBUF_FORMAT|PyBUF_STRIDES) == 0 ||
+ PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT) == 0) {
+
+ PyErr_Clear();
+ dtype = _descriptor_from_pep3118_format(buffer_view.format);
+ PyBuffer_Release(&buffer_view);
+ if (dtype) {
+ goto promote_types;
+ }
+ }
+ else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 ||
+ PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
+
+ PyErr_Clear();
+ dtype = PyArray_DescrNewFromType(NPY_VOID);
+ dtype->elsize = buffer_view.itemsize;
+ PyBuffer_Release(&buffer_view);
goto promote_types;
}
+ else {
+ PyErr_Clear();
+ }
}
- else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 ||
- PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
-
- PyErr_Clear();
- dtype = PyArray_DescrNewFromType(NPY_VOID);
- dtype->elsize = buffer_view.itemsize;
- PyBuffer_Release(&buffer_view);
- goto promote_types;
- }
- else {
- PyErr_Clear();
- }
-#endif
/* The array interface */
- ip = PyObject_GetAttrString(obj, "__array_interface__");
+ ip = PyArray_GetAttrString_SuppressException(obj, "__array_interface__");
if (ip != NULL) {
if (PyDict_Check(ip)) {
PyObject *typestr;
@@ -352,12 +435,9 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
}
Py_DECREF(ip);
}
- else {
- PyErr_Clear();
- }
/* The array struct interface */
- ip = PyObject_GetAttrString(obj, "__array_struct__");
+ ip = PyArray_GetAttrString_SuppressException(obj, "__array_struct__");
if (ip != NULL) {
PyArrayInterface *inter;
char buf[40];
@@ -377,9 +457,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
}
Py_DECREF(ip);
}
- else {
- PyErr_Clear();
- }
/* The old buffer interface */
#if !defined(NPY_PY3K)
@@ -395,7 +472,9 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
#endif
/* The __array__ attribute */
- if (PyObject_HasAttrString(obj, "__array__")) {
+ ip = PyArray_GetAttrString_SuppressException(obj, "__array__");
+ if (ip != NULL) {
+ Py_DECREF(ip);
ip = PyObject_CallMethod(obj, "__array__", NULL);
if(ip && PyArray_Check(ip)) {
dtype = PyArray_DESCR((PyArrayObject *)ip);
@@ -469,9 +548,11 @@ promote_types:
/* Set 'out_dtype' if it's NULL */
if (*out_dtype == NULL) {
if (!string_type && dtype->type_num == NPY_STRING) {
+ Py_DECREF(dtype);
return RETRY_WITH_STRING;
}
if (!string_type && dtype->type_num == NPY_UNICODE) {
+ Py_DECREF(dtype);
return RETRY_WITH_UNICODE;
}
*out_dtype = dtype;
@@ -484,17 +565,19 @@ promote_types:
if (res_dtype == NULL) {
return -1;
}
- Py_DECREF(*out_dtype);
if (!string_type &&
res_dtype->type_num == NPY_UNICODE &&
(*out_dtype)->type_num != NPY_UNICODE) {
+ Py_DECREF(res_dtype);
return RETRY_WITH_UNICODE;
}
if (!string_type &&
res_dtype->type_num == NPY_STRING &&
(*out_dtype)->type_num != NPY_STRING) {
+ Py_DECREF(res_dtype);
return RETRY_WITH_STRING;
}
+ Py_DECREF(*out_dtype);
*out_dtype = res_dtype;
return 0;
}
@@ -567,7 +650,7 @@ index2ptr(PyArrayObject *mp, npy_intp i)
if (i == 0) {
return PyArray_DATA(mp);
}
- return PyArray_DATA(mp)+i*PyArray_STRIDES(mp)[0];
+ return PyArray_BYTES(mp)+i*PyArray_STRIDES(mp)[0];
}
NPY_NO_EXPORT int
@@ -592,8 +675,8 @@ _zerofill(PyArrayObject *ret)
NPY_NO_EXPORT int
_IsAligned(PyArrayObject *ap)
{
- int i, alignment, aligned = 1;
- npy_intp ptr;
+ unsigned int i, aligned = 1;
+ const unsigned int alignment = PyArray_DESCR(ap)->alignment;
/* The special casing for STRING and VOID types was removed
* in accordance with http://projects.scipy.org/numpy/ticket/1227
@@ -602,14 +685,25 @@ _IsAligned(PyArrayObject *ap)
* PyArray_DescrConverter(), but not necessarily when using
* PyArray_DescrAlignConverter(). */
- alignment = PyArray_DESCR(ap)->alignment;
if (alignment == 1) {
return 1;
}
- ptr = (npy_intp) PyArray_DATA(ap);
- aligned = (ptr % alignment) == 0;
+ aligned = npy_is_aligned(PyArray_DATA(ap), alignment);
+
for (i = 0; i < PyArray_NDIM(ap); i++) {
- aligned &= ((PyArray_STRIDES(ap)[i] % alignment) == 0);
+#if NPY_RELAXED_STRIDES_CHECKING
+ if (PyArray_DIM(ap, i) > 1) {
+ /* if shape[i] == 1, the stride is never used */
+ aligned &= npy_is_aligned((void*)PyArray_STRIDES(ap)[i],
+ alignment);
+ }
+ else if (PyArray_DIM(ap, i) == 0) {
+ /* an array with zero elements is always aligned */
+ return 1;
+ }
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ aligned &= npy_is_aligned((void*)PyArray_STRIDES(ap)[i], alignment);
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return aligned != 0;
}
@@ -658,3 +752,36 @@ _IsWriteable(PyArrayObject *ap)
}
return NPY_TRUE;
}
+
+
+/* Gets a half-open range [start, end) of offsets from the data pointer */
+NPY_NO_EXPORT void
+offset_bounds_from_strides(const int itemsize, const int nd,
+ const npy_intp *dims, const npy_intp *strides,
+ npy_intp *lower_offset, npy_intp *upper_offset) {
+ npy_intp max_axis_offset;
+ npy_intp lower = 0;
+ npy_intp upper = 0;
+ int i;
+
+ for (i = 0; i < nd; i++) {
+ if (dims[i] == 0) {
+ /* If the array size is zero, return an empty range */
+ *lower_offset = 0;
+ *upper_offset = 0;
+ return;
+ }
+ /* Expand either upwards or downwards depending on stride */
+ max_axis_offset = strides[i] * (dims[i] - 1);
+ if (max_axis_offset > 0) {
+ upper += max_axis_offset;
+ }
+ else {
+ lower += max_axis_offset;
+ }
+ }
+ /* Return a half-open range */
+ upper += itemsize;
+ *lower_offset = lower;
+ *upper_offset = upper;
+}
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index b68d4286b..82b66123c 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -1,5 +1,6 @@
#ifndef _NPY_PRIVATE_COMMON_H_
#define _NPY_PRIVATE_COMMON_H_
+#include <numpy/npy_common.h>
#define error_converting(x) (((x) == -1) && PyErr_Occurred())
@@ -25,6 +26,9 @@ NPY_NO_EXPORT int
PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
PyArray_Descr **out_dtype, int string_status);
+NPY_NO_EXPORT PyObject *
+PyArray_GetAttrString_SuppressException(PyObject *v, char *name);
+
/*
* Returns NULL without setting an exception if no scalar is matched, a
* new dtype reference otherwise.
@@ -57,6 +61,30 @@ _IsAligned(PyArrayObject *ap);
NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap);
+NPY_NO_EXPORT void
+offset_bounds_from_strides(const int itemsize, const int nd,
+ const npy_intp *dims, const npy_intp *strides,
+ npy_intp *lower_offset, npy_intp *upper_offset);
+
+/*
+ * return true if pointer is aligned to 'alignment'
+ */
+static NPY_INLINE int
+npy_is_aligned(const void * p, const npy_uintp alignment)
+{
+ /*
+ * alignment is usually a power of two
+ * the test is faster than a direct modulo
+ */
+ if (NPY_LIKELY((alignment & (alignment - 1)) == 0)) {
+ return ((npy_uintp)(p) & ((alignment) - 1)) == 0;
+ }
+ else {
+ return ((npy_uintp)(p) % alignment) == 0;
+ }
+}
+
+
#include "ucsnarrow.h"
#endif
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 914bc274d..a17156e53 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -84,7 +84,7 @@ PyArray_OutputConverter(PyObject *object, PyArrayObject **address)
NPY_NO_EXPORT int
PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
{
- int len;
+ Py_ssize_t len;
int nd;
seq->ptr = NULL;
@@ -94,14 +94,18 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
}
len = PySequence_Size(obj);
if (len == -1) {
- /* Check to see if it is a number */
+ /* Check to see if it is an integer number */
if (PyNumber_Check(obj)) {
+ /*
+ * After the deprecation the PyNumber_Check could be replaced
+ * by PyIndex_Check.
+ */
len = 1;
}
}
if (len < 0) {
PyErr_SetString(PyExc_TypeError,
- "expected sequence object with len >= 0");
+ "expected sequence object with len >= 0 or a single integer");
return NPY_FAIL;
}
if (len > NPY_MAXDIMS) {
@@ -117,7 +121,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
}
}
seq->len = len;
- nd = PyArray_IntpFromSequence(obj, (npy_intp *)seq->ptr, len);
+ nd = PyArray_IntpFromIndexSequence(obj, (npy_intp *)seq->ptr, len);
if (nd == -1 || nd != len) {
PyDimMem_FREE(seq->ptr);
seq->ptr = NULL;
@@ -187,7 +191,7 @@ PyArray_AxisConverter(PyObject *obj, int *axis)
*axis = NPY_MAXDIMS;
}
else {
- *axis = (int) PyInt_AsLong(obj);
+ *axis = PyArray_PyIntAsInt(obj);
if (PyErr_Occurred()) {
return NPY_FAIL;
}
@@ -223,9 +227,9 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags)
}
for (i = 0; i < naxes; ++i) {
PyObject *tmp = PyTuple_GET_ITEM(axis_in, i);
- long axis = PyInt_AsLong(tmp);
- long axis_orig = axis;
- if (axis == -1 && PyErr_Occurred()) {
+ int axis = PyArray_PyIntAsInt(tmp);
+ int axis_orig = axis;
+ if (error_converting(axis)) {
return NPY_FAIL;
}
if (axis < 0) {
@@ -233,7 +237,7 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags)
}
if (axis < 0 || axis >= ndim) {
PyErr_Format(PyExc_ValueError,
- "'axis' entry %ld is out of bounds [-%d, %d)",
+ "'axis' entry %d is out of bounds [-%d, %d)",
axis_orig, ndim, ndim);
return NPY_FAIL;
}
@@ -249,14 +253,14 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags)
}
/* Try to interpret axis as an integer */
else {
- long axis, axis_orig;
+ int axis, axis_orig;
memset(out_axis_flags, 0, ndim);
- axis = PyInt_AsLong(axis_in);
+ axis = PyArray_PyIntAsInt(axis_in);
axis_orig = axis;
- /* TODO: PyNumber_Index would be good to use here */
- if (axis == -1 && PyErr_Occurred()) {
+
+ if (error_converting(axis)) {
return NPY_FAIL;
}
if (axis < 0) {
@@ -272,7 +276,7 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags)
if (axis < 0 || axis >= ndim) {
PyErr_Format(PyExc_ValueError,
- "'axis' entry %ld is out of bounds [-%d, %d)",
+ "'axis' entry %d is out of bounds [-%d, %d)",
axis_orig, ndim, ndim);
return NPY_FAIL;
}
@@ -402,6 +406,45 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
}
/*NUMPY_API
+ * Convert object to select kind
+ */
+NPY_NO_EXPORT int
+PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind)
+{
+ char *str;
+ PyObject *tmp = NULL;
+
+ if (PyUnicode_Check(obj)) {
+ obj = tmp = PyUnicode_AsASCIIString(obj);
+ }
+
+ *selectkind = NPY_INTROSELECT;
+ str = PyBytes_AsString(obj);
+ if (!str) {
+ Py_XDECREF(tmp);
+ return NPY_FAIL;
+ }
+ if (strlen(str) < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Select kind string must be at least length 1");
+ Py_XDECREF(tmp);
+ return NPY_FAIL;
+ }
+ if (strcmp(str, "introselect") == 0) {
+ *selectkind = NPY_INTROSELECT;
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "%s is an unrecognized kind of select",
+ str);
+ Py_XDECREF(tmp);
+ return NPY_FAIL;
+ }
+ Py_XDECREF(tmp);
+ return NPY_SUCCEED;
+}
+
+/*NUMPY_API
* Convert object to searchsorted side
*/
NPY_NO_EXPORT int
@@ -529,8 +572,8 @@ PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val)
return ret;
}
else {
- int number = PyInt_AsLong(object);
- if (number == -1 && PyErr_Occurred()) {
+ int number = PyArray_PyIntAsInt(object);
+ if (error_converting(number)) {
goto fail;
}
if (number <= (int) NPY_RAISE
@@ -664,85 +707,11 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting)
NPY_NO_EXPORT int
PyArray_PyIntAsInt(PyObject *o)
{
- long long_value = -1;
- PyObject *obj;
- static char *msg = "an integer is required";
- PyArrayObject *arr;
- PyArray_Descr *descr;
- int ret;
+ npy_intp long_value;
+ /* This assumes that NPY_SIZEOF_INTP >= NPY_SIZEOF_INT */
+ long_value = PyArray_PyIntAsIntp(o);
-
- if (!o) {
- PyErr_SetString(PyExc_TypeError, msg);
- return -1;
- }
- if (PyInt_Check(o)) {
- long_value = (long) PyInt_AS_LONG(o);
- goto finish;
- } else if (PyLong_Check(o)) {
- long_value = (long) PyLong_AsLong(o);
- goto finish;
- }
-
- descr = &INT_Descr;
- arr = NULL;
- if (PyArray_Check(o)) {
- if (PyArray_SIZE((PyArrayObject *)o)!=1 ||
- !PyArray_ISINTEGER((PyArrayObject *)o)) {
- PyErr_SetString(PyExc_TypeError, msg);
- return -1;
- }
- Py_INCREF(descr);
- arr = (PyArrayObject *)PyArray_CastToType((PyArrayObject *)o,
- descr, 0);
- }
- if (PyArray_IsScalar(o, Integer)) {
- Py_INCREF(descr);
- arr = (PyArrayObject *)PyArray_FromScalar(o, descr);
- }
- if (arr != NULL) {
- ret = *((int *)PyArray_DATA(arr));
- Py_DECREF(arr);
- return ret;
- }
-#if (PY_VERSION_HEX >= 0x02050000)
- if (PyIndex_Check(o)) {
- PyObject* value = PyNumber_Index(o);
- long_value = (npy_longlong) PyInt_AsSsize_t(value);
- goto finish;
- }
-#endif
- if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_int != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_int(o);
- if (obj == NULL) {
- return -1;
- }
- long_value = (long) PyLong_AsLong(obj);
- Py_DECREF(obj);
- }
-#if !defined(NPY_PY3K)
- else if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_long != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_long(o);
- if (obj == NULL) {
- return -1;
- }
- long_value = (long) PyLong_AsLong(obj);
- Py_DECREF(obj);
- }
-#endif
- else {
- PyErr_SetString(PyExc_NotImplementedError,"");
- }
-
- finish:
- if error_converting(long_value) {
- PyErr_SetString(PyExc_TypeError, msg);
- return -1;
- }
-
-#if (SIZEOF_LONG > SIZEOF_INT)
+#if (NPY_SIZEOF_INTP > NPY_SIZEOF_INT)
if ((long_value < INT_MIN) || (long_value > INT_MAX)) {
PyErr_SetString(PyExc_ValueError, "integer won't fit into a C int");
return -1;
@@ -755,145 +724,188 @@ PyArray_PyIntAsInt(PyObject *o)
NPY_NO_EXPORT npy_intp
PyArray_PyIntAsIntp(PyObject *o)
{
+#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ npy_long long_value = -1;
+#else
npy_longlong long_value = -1;
- PyObject *obj;
+#endif
+ PyObject *obj, *err;
static char *msg = "an integer is required";
- PyArrayObject *arr;
- PyArray_Descr *descr;
- npy_intp ret;
if (!o) {
PyErr_SetString(PyExc_TypeError, msg);
return -1;
}
- if (PyInt_Check(o)) {
- long_value = (npy_longlong) PyInt_AS_LONG(o);
- goto finish;
- } else if (PyLong_Check(o)) {
- long_value = (npy_longlong) PyLong_AsLongLong(o);
- goto finish;
+
+ /* Be a bit stricter and not allow bools, np.bool_ is handled later */
+ if (PyBool_Check(o)) {
+ if (DEPRECATE("using a boolean instead of an integer"
+ " will result in an error in the future") < 0) {
+ return -1;
+ }
}
-#if NPY_SIZEOF_INTP == SIZEOF_LONG
- descr = &LONG_Descr;
-#elif NPY_SIZEOF_INTP == SIZEOF_INT
- descr = &INT_Descr;
+ /*
+ * Since it is the usual case, first check if o is an integer. This is
+ * an exact check, since otherwise __index__ is used.
+ */
+#if !defined(NPY_PY3K)
+ if PyInt_CheckExact(o) {
+ #if (NPY_SIZEOF_LONG <= NPY_SIZEOF_INTP)
+ /* No overflow is possible, so we can just return */
+ return PyInt_AS_LONG(o);
+ #else
+ long_value = PyInt_AS_LONG(o);
+ goto overflow_check;
+ #endif
+ }
+ else
+#endif
+ if PyLong_CheckExact(o) {
+#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ long_value = PyLong_AsLongLong(o);
#else
- descr = &LONGLONG_Descr;
+ long_value = PyLong_AsLong(o);
#endif
- arr = NULL;
+ return (npy_intp)long_value;
+ }
- if (PyArray_Check(o)) {
- if (PyArray_SIZE((PyArrayObject *)o)!=1 ||
- !PyArray_ISINTEGER((PyArrayObject *)o)) {
- PyErr_SetString(PyExc_TypeError, msg);
+ /* Disallow numpy.bool_. Boolean arrays do not currently support index. */
+ if (PyArray_IsScalar(o, Bool)) {
+ if (DEPRECATE("using a boolean instead of an integer"
+ " will result in an error in the future") < 0) {
return -1;
}
- Py_INCREF(descr);
- arr = (PyArrayObject *)PyArray_CastToType((PyArrayObject *)o,
- descr, 0);
}
- else if (PyArray_IsScalar(o, Integer)) {
- Py_INCREF(descr);
- arr = (PyArrayObject *)PyArray_FromScalar(o, descr);
+
+ /*
+ * The most general case. PyNumber_Index(o) covers everything
+ * including arrays. In principle it may be possible to replace
+ * the whole function by PyIndex_AsSSize_t after deprecation.
+ */
+ obj = PyNumber_Index(o);
+ if (obj) {
+#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ long_value = PyLong_AsLongLong(obj);
+#else
+ long_value = PyLong_AsLong(obj);
+#endif
+ Py_DECREF(obj);
+ goto finish;
}
- if (arr != NULL) {
- ret = *((npy_intp *)PyArray_DATA(arr));
- Py_DECREF(arr);
- return ret;
+ else {
+ /*
+ * Set the TypeError like PyNumber_Index(o) would after trying
+ * the general case.
+ */
+ PyErr_Clear();
}
-#if (PY_VERSION_HEX >= 0x02050000)
- if (PyIndex_Check(o)) {
- PyObject* value = PyNumber_Index(o);
- if (value == NULL) {
+ /*
+ * For backward compatibility check the number C-Api number protcol
+ * This should be removed up the finish label after deprecation.
+ */
+ if (Py_TYPE(o)->tp_as_number != NULL &&
+ Py_TYPE(o)->tp_as_number->nb_int != NULL) {
+ obj = Py_TYPE(o)->tp_as_number->nb_int(o);
+ if (obj == NULL) {
return -1;
}
- long_value = (npy_longlong) PyInt_AsSsize_t(value);
- goto finish;
+ #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ long_value = PyLong_AsLongLong(obj);
+ #else
+ long_value = PyLong_AsLong(obj);
+ #endif
+ Py_DECREF(obj);
}
-#endif
#if !defined(NPY_PY3K)
- if (Py_TYPE(o)->tp_as_number != NULL && \
- Py_TYPE(o)->tp_as_number->nb_long != NULL) {
+ else if (Py_TYPE(o)->tp_as_number != NULL &&
+ Py_TYPE(o)->tp_as_number->nb_long != NULL) {
obj = Py_TYPE(o)->tp_as_number->nb_long(o);
- if (obj != NULL) {
- long_value = (npy_longlong) PyLong_AsLongLong(obj);
- Py_DECREF(obj);
+ if (obj == NULL) {
+ return -1;
}
+ #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ long_value = PyLong_AsLongLong(obj);
+ #else
+ long_value = PyLong_AsLong(obj);
+ #endif
+ Py_DECREF(obj);
}
- else
#endif
- if (Py_TYPE(o)->tp_as_number != NULL && \
- Py_TYPE(o)->tp_as_number->nb_int != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_int(o);
- if (obj != NULL) {
- long_value = (npy_longlong) PyLong_AsLongLong(obj);
- Py_DECREF(obj);
- }
- }
else {
- PyErr_SetString(PyExc_NotImplementedError,"");
+ PyErr_SetString(PyExc_TypeError, msg);
+ return -1;
+ }
+ /* Give a deprecation warning, unless there was already an error */
+ if (!error_converting(long_value)) {
+ if (DEPRECATE("using a non-integer number instead of an integer"
+ " will result in an error in the future") < 0) {
+ return -1;
+ }
}
finish:
- if error_converting(long_value) {
+ if (error_converting(long_value)) {
+ err = PyErr_Occurred();
+ /* Only replace TypeError's here, which are the normal errors. */
+ if (PyErr_GivenExceptionMatches(err, PyExc_TypeError)) {
PyErr_SetString(PyExc_TypeError, msg);
- return -1;
}
+ return -1;
+ }
-#if (NPY_SIZEOF_LONGLONG > NPY_SIZEOF_INTP)
+ overflow_check:
+#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
+ #if (NPY_SIZEOF_LONGLONG > NPY_SIZEOF_INTP)
if ((long_value < NPY_MIN_INTP) || (long_value > NPY_MAX_INTP)) {
- PyErr_SetString(PyExc_ValueError,
- "integer won't fit into a C intp");
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C numpy.intp");
return -1;
}
+ #endif
+#else
+ #if (NPY_SIZEOF_LONG > NPY_SIZEOF_INTP)
+ if ((long_value < NPY_MIN_INTP) || (long_value > NPY_MAX_INTP)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C numpy.intp");
+ return -1;
+ }
+ #endif
#endif
- return (npy_intp) long_value;
+ return long_value;
}
-/*NUMPY_API
- * PyArray_IntpFromSequence
+
+/*
+ * PyArray_IntpFromIndexSequence
* Returns the number of dimensions or -1 if an error occurred.
- * vals must be large enough to hold maxvals
+ * vals must be large enough to hold maxvals.
+ * Opposed to PyArray_IntpFromSequence it uses and returns npy_intp
+ * for the number of values.
*/
-NPY_NO_EXPORT int
-PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
+NPY_NO_EXPORT npy_intp
+PyArray_IntpFromIndexSequence(PyObject *seq, npy_intp *vals, npy_intp maxvals)
{
- int nd, i;
+ Py_ssize_t nd;
+ npy_intp i;
PyObject *op, *err;
/*
* Check to see if sequence is a single integer first.
* or, can be made into one
*/
- if ((nd=PySequence_Length(seq)) == -1) {
- if (PyErr_Occurred()) PyErr_Clear();
-#if SIZEOF_LONG >= NPY_SIZEOF_INTP && !defined(NPY_PY3K)
- if (!(op = PyNumber_Int(seq))) {
- return -1;
- }
-#else
- if (!(op = PyNumber_Long(seq))) {
- return -1;
+ nd = PySequence_Length(seq);
+ if (nd == -1) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
}
-#endif
- nd = 1;
-#if SIZEOF_LONG >= NPY_SIZEOF_INTP
- vals[0] = (npy_intp ) PyInt_AsLong(op);
-#else
- vals[0] = (npy_intp ) PyLong_AsLongLong(op);
-#endif
- Py_DECREF(op);
- /*
- * Check wether there was an error - if the error was an overflow, raise
- * a ValueError instead to be more helpful
- */
+ vals[0] = PyArray_PyIntAsIntp(seq);
if(vals[0] == -1) {
err = PyErr_Occurred();
- if (err &&
- PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) {
+ if (err &&
+ PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) {
PyErr_SetString(PyExc_ValueError,
"Maximum allowed dimension exceeded");
}
@@ -901,6 +913,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
return -1;
}
}
+ nd = 1;
}
else {
for (i = 0; i < PyArray_MIN(nd,maxvals); i++) {
@@ -908,21 +921,13 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
if (op == NULL) {
return -1;
}
-#if SIZEOF_LONG >= NPY_SIZEOF_INTP
- vals[i]=(npy_intp )PyInt_AsLong(op);
-#else
- vals[i]=(npy_intp )PyLong_AsLongLong(op);
-#endif
- Py_DECREF(op);
- /*
- * Check wether there was an error - if the error was an overflow,
- * raise a ValueError instead to be more helpful
- */
- if(vals[0] == -1) {
+
+ vals[i] = PyArray_PyIntAsIntp(op);
+ if(vals[i] == -1) {
err = PyErr_Occurred();
- if (err &&
- PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) {
+ if (err &&
+ PyErr_GivenExceptionMatches(err, PyExc_OverflowError)) {
PyErr_SetString(PyExc_ValueError,
"Maximum allowed dimension exceeded");
}
@@ -935,6 +940,18 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
return nd;
}
+/*NUMPY_API
+ * PyArray_IntpFromSequence
+ * Returns the number of integers converted or -1 if an error occurred.
+ * vals must be large enough to hold maxvals
+ */
+NPY_NO_EXPORT int
+PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
+{
+ return PyArray_IntpFromIndexSequence(seq, vals, (npy_intp)maxvals);
+}
+
+
/**
* WARNING: This flag is a bad idea, but was the only way to both
* 1) Support unpickling legacy pickles with object types.
@@ -954,6 +971,9 @@ NPY_NO_EXPORT int
PyArray_TypestrConvert(int itemsize, int gentype)
{
int newtype = NPY_NOTYPE;
+ PyArray_Descr *temp;
+ const char *msg = "Specified size is invalid for this data type.\n"
+ "Size will be ignored in NumPy 1.7 but may throw an exception in future versions.";
switch (gentype) {
case NPY_GENBOOLLTR:
@@ -1106,6 +1126,32 @@ PyArray_TypestrConvert(int itemsize, int gentype)
}
break;
}
+
+ /*
+ * Raise deprecate warning if new type hasn't been
+ * set yet and size char is invalid.
+ * This should eventually be changed to an error in
+ * future NumPy versions.
+ */
+ if (newtype == NPY_NOTYPE) {
+ temp = PyArray_DescrFromType(gentype);
+ if (temp != NULL) {
+ if (temp->elsize != itemsize) {
+ if (DEPRECATE(msg) < 0) {
+ Py_DECREF(temp);
+ return -1;
+ }
+
+ newtype = gentype;
+ }
+ else {
+ newtype = gentype;
+ }
+
+ Py_DECREF(temp);
+ }
+ }
+
return newtype;
}
@@ -1124,7 +1170,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp *vals)
goto fail;
}
for (i = 0; i < len; i++) {
-#if NPY_SIZEOF_INTP <= SIZEOF_LONG
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
PyObject *o = PyInt_FromLong((long) vals[i]);
#else
PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]);
diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h
index baa110e90..cc16cd9c7 100644
--- a/numpy/core/src/multiarray/conversion_utils.h
+++ b/numpy/core/src/multiarray/conversion_utils.h
@@ -25,6 +25,9 @@ PyArray_PyIntAsInt(PyObject *o);
NPY_NO_EXPORT npy_intp
PyArray_PyIntAsIntp(PyObject *o);
+NPY_NO_EXPORT npy_intp
+PyArray_IntpFromIndexSequence(PyObject *seq, npy_intp *vals, npy_intp maxvals);
+
NPY_NO_EXPORT int
PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals);
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e3dd78b9f..62b9034c2 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -413,7 +413,14 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
else {
PyArrayObject *src_arr;
- src_arr = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, NULL);
+ /**
+ * The dtype of the destination is used when converting
+ * from the pyobject, so that for example a tuple gets
+ * recognized as a struct scalar of the required type.
+ */
+ Py_INCREF(PyArray_DTYPE(arr));
+ src_arr = (PyArrayObject *)PyArray_FromAny(obj,
+ PyArray_DTYPE(arr), 0, 0, 0, NULL);
if (src_arr == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/convert.h b/numpy/core/src/multiarray/convert.h
index de24e27cf..96df19711 100644
--- a/numpy/core/src/multiarray/convert.h
+++ b/numpy/core/src/multiarray/convert.h
@@ -1,4 +1,8 @@
#ifndef _NPY_ARRAYOBJECT_CONVERT_H_
#define _NPY_ARRAYOBJECT_CONVERT_H_
+NPY_NO_EXPORT int
+PyArray_AssignZero(PyArrayObject *dst,
+ PyArrayObject *wheremask);
+
#endif
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 586b85b1e..19ba5d327 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -97,15 +97,9 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
cls = PyObject_GetAttrString(obj, "ComplexWarning");
Py_DECREF(obj);
}
-#if PY_VERSION_HEX >= 0x02050000
ret = PyErr_WarnEx(cls,
- "Casting complex values to real discards "
- "the imaginary part", 1);
-#else
- ret = PyErr_Warn(cls,
- "Casting complex values to real discards "
- "the imaginary part");
-#endif
+ "Casting complex values to real discards "
+ "the imaginary part", 1);
Py_XDECREF(cls);
if (ret < 0) {
return NULL;
@@ -115,7 +109,8 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
return castfunc;
}
- PyErr_SetString(PyExc_ValueError, "No cast function available.");
+ PyErr_SetString(PyExc_ValueError,
+ "No cast function available.");
return NULL;
}
@@ -141,6 +136,12 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
{
PyArray_DatetimeMetaData *meta;
int flex_type_num;
+ PyArrayObject *arr = NULL;
+ PyArray_Descr *dtype = NULL;
+ int ndim = 0;
+ npy_intp dims[NPY_MAXDIMS];
+ PyObject *list = NULL;
+ int result;
if (*flex_dtype == NULL) {
if (!PyErr_Occurred()) {
@@ -220,6 +221,61 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
break;
case NPY_OBJECT:
size = 64;
+ if ((flex_type_num == NPY_STRING ||
+ flex_type_num == NPY_UNICODE) &&
+ data_obj != NULL) {
+ if (PyArray_CheckScalar(data_obj)) {
+ PyObject *scalar = PyArray_ToList(data_obj);
+ if (scalar != NULL) {
+ PyObject *s = PyObject_Str(scalar);
+ if (s == NULL) {
+ Py_DECREF(scalar);
+ Py_DECREF(*flex_dtype);
+ *flex_dtype = NULL;
+ return;
+ }
+ else {
+ size = PyObject_Length(s);
+ Py_DECREF(s);
+ }
+ Py_DECREF(scalar);
+ }
+ }
+ else if (PyArray_Check(data_obj)) {
+ /*
+ * Convert data array to list of objects since
+ * GetArrayParamsFromObject won't iterate over
+ * array.
+ */
+ list = PyArray_ToList(data_obj);
+ result = PyArray_GetArrayParamsFromObject(
+ list,
+ *flex_dtype,
+ 0, &dtype,
+ &ndim, dims, &arr, NULL);
+ if (result == 0 && dtype != NULL) {
+ if (flex_type_num == NPY_UNICODE) {
+ size = dtype->elsize / 4;
+ }
+ else {
+ size = dtype->elsize;
+ }
+ }
+ Py_DECREF(list);
+ }
+ else if (PyArray_IsPythonScalar(data_obj)) {
+ PyObject *s = PyObject_Str(data_obj);
+ if (s == NULL) {
+ Py_DECREF(*flex_dtype);
+ *flex_dtype = NULL;
+ return;
+ }
+ else {
+ size = PyObject_Length(s);
+ Py_DECREF(s);
+ }
+ }
+ }
break;
case NPY_STRING:
case NPY_VOID:
@@ -682,7 +738,11 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
/* An aligned memory buffer large enough to hold any type */
npy_longlong value[4];
- if (casting == NPY_UNSAFE_CASTING) {
+ /*
+ * If the two dtypes are actually references to the same object
+ * or if casting type is forced unsafe then always OK.
+ */
+ if (scal_type == to || casting == NPY_UNSAFE_CASTING ) {
return 1;
}
@@ -1817,7 +1877,7 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
if (PyArray_Check(op)) {
for (i = 0; i < n; i++) {
- mps[i] = (PyArrayObject *) array_big_item((PyArrayObject *)op, i);
+ mps[i] = (PyArrayObject *) array_item_asarray((PyArrayObject *)op, i);
}
if (!PyArray_ISCARRAY((PyArrayObject *)op)) {
for (i = 0; i < n; i++) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index ef6c40b46..eccb6d0a2 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -311,25 +311,42 @@ _strided_byte_swap(void *p, npy_intp stride, npy_intp n, int size)
case 1: /* no byteswap necessary */
break;
case 4:
- for (a = (char*)p; n > 0; n--, a += stride - 1) {
- b = a + 3;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a = *b; *b = c;
+ if (npy_is_aligned(p, sizeof(npy_uint32))) {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_uint32 * a_ = (npy_uint32 *)a;
+ *a_ = npy_bswap4(*a_);
+ }
+ }
+ else {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_bswap4_unaligned(a);
+ }
}
break;
case 8:
- for (a = (char*)p; n > 0; n--, a += stride - 3) {
- b = a + 7;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a = *b; *b = c;
+ if (npy_is_aligned(p, sizeof(npy_uint64))) {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_uint64 * a_ = (npy_uint64 *)a;
+ *a_ = npy_bswap8(*a_);
+ }
+ }
+ else {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_bswap8_unaligned(a);
+ }
}
break;
case 2:
- for (a = (char*)p; n > 0; n--, a += stride) {
- b = a + 1;
- c = *a; *a = *b; *b = c;
+ if (npy_is_aligned(p, sizeof(npy_uint16))) {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_uint16 * a_ = (npy_uint16 *)a;
+ *a_ = npy_bswap2(*a_);
+ }
+ }
+ else {
+ for (a = (char*)p; n > 0; n--, a += stride) {
+ npy_bswap2_unaligned(a);
+ }
}
break;
default:
@@ -356,15 +373,14 @@ NPY_NO_EXPORT void
copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
npy_intp srcstrides, int swap)
{
- npy_intp i;
- char *s1 = (char *)src;
- char *d1 = (char *)dst;
-
-
if ((numitems == 1) || (itemsize == srcstrides)) {
- memcpy(d1, s1, itemsize*numitems);
+ memcpy(dst, src, itemsize*numitems);
}
else {
+ npy_intp i;
+ char *s1 = (char *)src;
+ char *d1 = (char *)dst;
+
for (i = 0; i < numitems; i++) {
memcpy(d1, s1, itemsize);
d1 += itemsize;
@@ -373,7 +389,7 @@ copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
}
if (swap) {
- byte_swap_vector(d1, numitems, itemsize);
+ byte_swap_vector(dst, numitems, itemsize);
}
}
@@ -443,7 +459,7 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
}
else {
res = PyArray_DESCR(a)->f->setitem(o,
- (PyArray_DATA(a) + offset), a);
+ (PyArray_BYTES(a) + offset), a);
}
if (res < 0) {
Py_DECREF(o);
@@ -465,7 +481,7 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
}
else {
res = PyArray_DESCR(a)->f->setitem(o,
- (PyArray_DATA(a) + offset), a);
+ (PyArray_BYTES(a) + offset), a);
}
Py_DECREF(o);
if (res < 0) {
@@ -505,7 +521,7 @@ PyArray_AssignFromSequence(PyArrayObject *self, PyObject *v)
*/
static int
-discover_itemsize(PyObject *s, int nd, int *itemsize)
+discover_itemsize(PyObject *s, int nd, int *itemsize, int size_as_string)
{
int n, r, i;
@@ -516,14 +532,26 @@ discover_itemsize(PyObject *s, int nd, int *itemsize)
if ((nd == 0) || PyString_Check(s) ||
#if defined(NPY_PY3K)
- PyMemoryView_Check(s) ||
+ PyMemoryView_Check(s) ||
#else
- PyBuffer_Check(s) ||
+ PyBuffer_Check(s) ||
#endif
- PyUnicode_Check(s)) {
+ PyUnicode_Check(s)) {
/* If an object has no length, leave it be */
- n = PyObject_Length(s);
+ if (size_as_string && s != NULL && !PyString_Check(s)) {
+ PyObject *s_string = PyObject_Str(s);
+ if (s_string) {
+ n = PyObject_Length(s_string);
+ Py_DECREF(s_string);
+ }
+ else {
+ n = -1;
+ }
+ }
+ else {
+ n = PyObject_Length(s);
+ }
if (n == -1) {
PyErr_Clear();
}
@@ -541,7 +569,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize)
return -1;
}
- r = discover_itemsize(e,nd-1,itemsize);
+ r = discover_itemsize(e, nd - 1, itemsize, size_as_string);
Py_DECREF(e);
if (r == -1) {
return -1;
@@ -562,9 +590,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
{
PyObject *e;
int r, n, i;
-#if PY_VERSION_HEX >= 0x02060000
Py_buffer buffer_view;
-#endif
if (*maxndim == 0) {
return 0;
@@ -627,34 +653,35 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
}
/* obj is a PEP 3118 buffer */
-#if PY_VERSION_HEX >= 0x02060000
/* PEP 3118 buffer interface */
- memset(&buffer_view, 0, sizeof(Py_buffer));
- if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 ||
- PyObject_GetBuffer(obj, &buffer_view, PyBUF_ND) == 0) {
- int nd = buffer_view.ndim;
- if (nd < *maxndim) {
- *maxndim = nd;
+ if (PyObject_CheckBuffer(obj) == 1) {
+ memset(&buffer_view, 0, sizeof(Py_buffer));
+ if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 ||
+ PyObject_GetBuffer(obj, &buffer_view, PyBUF_ND) == 0) {
+ int nd = buffer_view.ndim;
+ if (nd < *maxndim) {
+ *maxndim = nd;
+ }
+ for (i=0; i<*maxndim; i++) {
+ d[i] = buffer_view.shape[i];
+ }
+ PyBuffer_Release(&buffer_view);
+ return 0;
}
- for (i=0; i<*maxndim; i++) {
- d[i] = buffer_view.shape[i];
+ else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
+ d[0] = buffer_view.len;
+ *maxndim = 1;
+ PyBuffer_Release(&buffer_view);
+ return 0;
+ }
+ else {
+ PyErr_Clear();
}
- PyBuffer_Release(&buffer_view);
- return 0;
- }
- else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
- d[0] = buffer_view.len;
- *maxndim = 1;
- PyBuffer_Release(&buffer_view);
- return 0;
- }
- else {
- PyErr_Clear();
}
-#endif
/* obj has the __array_struct__ interface */
- if ((e = PyObject_GetAttrString(obj, "__array_struct__")) != NULL) {
+ e = PyArray_GetAttrString_SuppressException(obj, "__array_struct__");
+ if (e != NULL) {
int nd = -1;
if (NpyCapsule_Check(e)) {
PyArrayInterface *inter;
@@ -676,12 +703,10 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
}
- else {
- PyErr_Clear();
- }
/* obj has the __array_interface__ interface */
- if ((e = PyObject_GetAttrString(obj, "__array_interface__")) != NULL) {
+ e = PyArray_GetAttrString_SuppressException(obj, "__array_interface__");
+ if (e != NULL) {
int nd = -1;
if (PyDict_Check(e)) {
PyObject *new;
@@ -692,11 +717,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
*maxndim = nd;
}
for (i=0; i<*maxndim; i++) {
-#if (PY_VERSION_HEX >= 0x02050000)
d[i] = PyInt_AsSsize_t(PyTuple_GET_ITEM(new, i));
-#else
- d[i] = PyInt_AsLong(PyTuple_GET_ITEM(new, i));
-#endif
if (d[i] < 0) {
PyErr_SetString(PyExc_RuntimeError,
"Invalid shape in __array_interface__");
@@ -711,9 +732,6 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
}
- else {
- PyErr_Clear();
- }
n = PySequence_Size(obj);
@@ -1171,7 +1189,6 @@ PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int type_num,
NPY_NO_EXPORT int
_array_from_buffer_3118(PyObject *obj, PyObject **out)
{
-#if PY_VERSION_HEX >= 0x02060000
/* PEP 3118 */
PyObject *memoryview;
Py_buffer *view;
@@ -1260,9 +1277,6 @@ fail:
Py_DECREF(memoryview);
return -1;
-#else
- return -1;
-#endif
}
/*NUMPY_API
@@ -1514,7 +1528,12 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
if ((*out_dtype)->elsize == 0 &&
PyTypeNum_ISEXTENDED((*out_dtype)->type_num)) {
int itemsize = 0;
- if (discover_itemsize(op, *out_ndim, &itemsize) < 0) {
+ int size_as_string = 0;
+ if ((*out_dtype)->type_num == NPY_STRING ||
+ (*out_dtype)->type_num == NPY_UNICODE) {
+ size_as_string = 1;
+ }
+ if (discover_itemsize(op, *out_ndim, &itemsize, size_as_string) < 0) {
Py_DECREF(*out_dtype);
if (PyErr_Occurred() &&
PyErr_GivenExceptionMatches(PyErr_Occurred(),
@@ -1784,6 +1803,14 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
oldtype = PyArray_DESCR(arr);
if (newtype == NULL) {
+ /*
+ * Check if object is of array with Null newtype.
+ * If so return it directly instead of checking for casting.
+ */
+ if (flags == 0) {
+ Py_INCREF(arr);
+ return (PyObject *)arr;
+ }
newtype = oldtype;
Py_INCREF(oldtype);
}
@@ -1917,9 +1944,8 @@ PyArray_FromStructInterface(PyObject *input)
PyArrayObject *ret;
char endian = NPY_NATBYTE;
- attr = PyObject_GetAttrString(input, "__array_struct__");
+ attr = PyArray_GetAttrString_SuppressException(input, "__array_struct__");
if (attr == NULL) {
- PyErr_Clear();
return Py_NotImplemented;
}
if (!NpyCapsule_Check(attr)) {
@@ -1992,9 +2018,9 @@ PyArray_FromInterface(PyObject *origin)
/* Get the memory from __array_data__ and __array_offset__ */
/* Get the strides */
- iface = PyObject_GetAttrString(origin, "__array_interface__");
+ iface = PyArray_GetAttrString_SuppressException(origin,
+ "__array_interface__");
if (iface == NULL) {
- PyErr_Clear();
return Py_NotImplemented;
}
if (!PyDict_Check(iface)) {
@@ -2075,11 +2101,6 @@ PyArray_FromInterface(PyObject *origin)
/* Case for data access through pointer */
if (attr && PyTuple_Check(attr)) {
PyObject *dataptr;
- if (n == 0) {
- PyErr_SetString(PyExc_ValueError,
- "__array_interface__ shape must be at least size 1");
- goto fail;
- }
if (PyTuple_GET_SIZE(attr) != 2) {
PyErr_SetString(PyExc_TypeError,
"__array_interface__ data must be a 2-tuple with "
@@ -2214,9 +2235,8 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context)
PyObject *new;
PyObject *array_meth;
- array_meth = PyObject_GetAttrString(op, "__array__");
+ array_meth = PyArray_GetAttrString_SuppressException(op, "__array__");
if (array_meth == NULL) {
- PyErr_Clear();
return Py_NotImplemented;
}
if (context == NULL) {
@@ -3459,9 +3479,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
goto done;
}
elcount = (count < 0) ? 0 : count;
- if ((elsize=dtype->elsize) == 0) {
- PyErr_SetString(PyExc_ValueError, "Must specify length "\
- "when using variable-size data-type.");
+ if ((elsize = dtype->elsize) == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Must specify length when using variable-size data-type.");
goto done;
}
@@ -3470,8 +3490,8 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
* reference counts before throwing away any memory.
*/
if (PyDataType_REFCHK(dtype)) {
- PyErr_SetString(PyExc_ValueError, "cannot create "\
- "object arrays from iterator");
+ PyErr_SetString(PyExc_ValueError,
+ "cannot create object arrays from iterator");
goto done;
}
@@ -3498,7 +3518,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
}
if (new_data == NULL) {
PyErr_SetString(PyExc_MemoryError,
- "cannot allocate array memory");
+ "cannot allocate array memory");
Py_DECREF(value);
goto done;
}
@@ -3506,16 +3526,21 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
}
PyArray_DIMS(ret)[0] = i + 1;
- if (((item = index2ptr(ret, i)) == NULL)
- || (PyArray_DESCR(ret)->f->setitem(value, item, ret) == -1)) {
+ if (((item = index2ptr(ret, i)) == NULL) ||
+ (PyArray_DESCR(ret)->f->setitem(value, item, ret) == -1)) {
Py_DECREF(value);
goto done;
}
Py_DECREF(value);
}
+
+ if (PyErr_Occurred()) {
+ goto done;
+ }
if (i < count) {
- PyErr_SetString(PyExc_ValueError, "iterator too short");
+ PyErr_SetString(PyExc_ValueError,
+ "iterator too short");
goto done;
}
@@ -3524,11 +3549,13 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
* (assuming realloc is reasonably good about reusing space...)
*/
if (i == 0) {
+ /* The size cannot be zero for PyDataMem_RENEW. */
i = 1;
}
new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * elsize);
if (new_data == NULL) {
- PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory");
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate array memory");
goto done;
}
((PyArrayObject_fields *)ret)->data = new_data;
@@ -3565,6 +3592,7 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
int inflag, int *objflags)
{
int i;
+#if NPY_RELAXED_STRIDES_CHECKING
npy_bool not_cf_contig = 0;
npy_bool nod = 0; /* A dim != 1 was found */
@@ -3578,6 +3606,7 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
nod = 1;
}
}
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
/* Only make Fortran strides if not contiguous as well */
if ((inflag & (NPY_ARRAY_F_CONTIGUOUS|NPY_ARRAY_C_CONTIGUOUS)) ==
@@ -3587,11 +3616,21 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
if (dims[i]) {
itemsize *= dims[i];
}
+#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
+ if (dims[i] == 1) {
+ /* For testing purpose only */
+ strides[i] = NPY_MAX_INTP;
+ }
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
+#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[nd-1] > 1))) {
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_F_CONTIGUOUS) &
~NPY_ARRAY_C_CONTIGUOUS;
}
@@ -3605,11 +3644,21 @@ _array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
if (dims[i]) {
itemsize *= dims[i];
}
+#if NPY_RELAXED_STRIDES_CHECKING
else {
not_cf_contig = 0;
}
+ if (dims[i] == 1) {
+ /* For testing purpose only */
+ strides[i] = NPY_MAX_INTP;
+ }
+#endif /* NPY_RELAXED_STRIDES_CHECKING */
}
+#if NPY_RELAXED_STRIDES_CHECKING
if (not_cf_contig) {
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if ((nd > 1) && ((strides[0] != strides[nd-1]) || (dims[0] > 1))) {
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
*objflags = ((*objflags)|NPY_ARRAY_C_CONTIGUOUS) &
~NPY_ARRAY_F_CONTIGUOUS;
}
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 82eea9f20..67ab5284c 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -547,7 +547,5 @@ NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index 32009bb79..e74da0384 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -24,7 +24,14 @@
#include "_datetime.h"
#include "datetime_strings.h"
-/* Platform-specific time_t typedef */
+/*
+ * Platform-specific time_t typedef. Some platforms use 32 bit, some use 64 bit
+ * and we just use the default with the exception of mingw, where we must use
+ * 64 bit because MSVCRT version 9 does not have the (32 bit) localtime()
+ * symbol, so we need to use the 64 bit version [1].
+ *
+ * [1] http://thread.gmane.org/gmane.comp.gnu.mingw.user/27011
+ */
#if defined(NPY_MINGW_USE_CUSTOM_MSVCR)
typedef __time64_t NPY_TIME_T;
#else
@@ -34,8 +41,35 @@
/*
* Wraps `localtime` functionality for multiple platforms. This
* converts a time value to a time structure in the local timezone.
+ * If size(NPY_TIME_T) == 4, then years must be between 1970 and 2038. If
+ * size(NPY_TIME_T) == 8, then years must be later than 1970. If the years are
+ * not in this range, then get_localtime() will fail on some platforms.
*
* Returns 0 on success, -1 on failure.
+ *
+ * Notes:
+ * 1) If NPY_TIME_T is 32 bit (i.e. sizeof(NPY_TIME_T) == 4), then the
+ * maximum year it can represent is 2038 (see [1] for more details). Trying
+ * to use a higher date like 2041 in the 32 bit "ts" variable below will
+ * typically result in "ts" being a negative number (corresponding roughly
+ * to a year ~ 1905). If NPY_TIME_T is 64 bit, then there is no such
+ * problem in practice.
+ * 2) If the "ts" argument to localtime() is negative, it represents
+ * years < 1970 both for 32 and 64 bits (for 32 bits the earliest year it can
+ * represent is 1901, while 64 bits can represent much earlier years).
+ * 3) On Linux, localtime() works for negative "ts". On Windows and in Wine,
+ * localtime() as well as the localtime_s() and _localtime64_s() functions
+ * will fail for any negative "ts" and return a nonzero exit number
+ * (localtime_s, _localtime64_s) or NULL (localtime). This behavior is the
+ * same for both 32 and 64 bits.
+ *
+ * From this it follows that get_localtime() is only guaranteed to work
+ * correctly on all platforms for years between 1970 and 2038 for 32bit
+ * NPY_TIME_T and years higher than 1970 for 64bit NPY_TIME_T. For
+ * multiplatform code, get_localtime() should never be used outside of this
+ * range.
+ *
+ * [1] http://en.wikipedia.org/wiki/Year_2038_problem
*/
static int
get_localtime(NPY_TIME_T *ts, struct tm *tms)
@@ -154,7 +188,9 @@ fail:
/*
* Converts a datetimestruct in UTC to a datetimestruct in local time,
- * also returning the timezone offset applied.
+ * also returning the timezone offset applied. This function works for any year
+ * > 1970 on all platforms and both 32 and 64 bits. If the year < 1970, then it
+ * will fail on some platforms.
*
* Returns 0 on success, -1 on failure.
*/
@@ -169,17 +205,23 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local,
/* Make a copy of the input 'dts' to modify */
*out_dts_local = *dts_utc;
- /* HACK: Use a year < 2038 for later years for small time_t */
+ /*
+ * For 32 bit NPY_TIME_T, the get_localtime() function does not work for
+ * years later than 2038, see the comments above get_localtime(). So if the
+ * year >= 2038, we instead call get_localtime() for the year 2036 or 2037
+ * (depending on the leap year) which must work and at the end we add the
+ * 'year_correction' back.
+ */
if (sizeof(NPY_TIME_T) == 4 && out_dts_local->year >= 2038) {
if (is_leapyear(out_dts_local->year)) {
/* 2036 is a leap year */
year_correction = out_dts_local->year - 2036;
- out_dts_local->year -= year_correction;
+ out_dts_local->year -= year_correction; /* = 2036 */
}
else {
/* 2037 is not a leap year */
year_correction = out_dts_local->year - 2037;
- out_dts_local->year -= year_correction;
+ out_dts_local->year -= year_correction; /* = 2037 */
}
}
@@ -189,12 +231,13 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local,
* we drop the seconds value from the npy_datetimestruct, everything
* is ok for this operation.
*/
- rawtime = (time_t)get_datetimestruct_days(out_dts_local) * 24 * 60 * 60;
+ rawtime = (NPY_TIME_T)get_datetimestruct_days(out_dts_local) * 24 * 60 * 60;
rawtime += dts_utc->hour * 60 * 60;
rawtime += dts_utc->min * 60;
/* localtime converts a 'time_t' into a local 'struct tm' */
if (get_localtime(&rawtime, &tm_) < 0) {
+ /* This should only fail if year < 1970 on some platforms. */
return -1;
}
@@ -207,13 +250,13 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local,
/* Extract the timezone offset that was applied */
rawtime /= 60;
- localrawtime = (time_t)get_datetimestruct_days(out_dts_local) * 24 * 60;
+ localrawtime = (NPY_TIME_T)get_datetimestruct_days(out_dts_local) * 24 * 60;
localrawtime += out_dts_local->hour * 60;
localrawtime += out_dts_local->min;
*out_timezone_offset = localrawtime - rawtime;
- /* Reapply the year 2038 year correction HACK */
+ /* Reapply the year 2038 year correction */
out_dts_local->year += year_correction;
return 0;
@@ -233,17 +276,23 @@ convert_datetimestruct_local_to_utc(npy_datetimestruct *out_dts_utc,
/* Make a copy of the input 'dts' to modify */
*out_dts_utc = *dts_local;
- /* HACK: Use a year < 2038 for later years for small time_t */
+ /*
+ * For 32 bit NPY_TIME_T, the get_mktime()/get_gmtime() functions do not
+ * work for years later than 2038. So if the year >= 2038, we instead call
+ * get_mktime()/get_gmtime() for the year 2036 or 2037 (depending on the
+ * leap year) which must work and at the end we add the 'year_correction'
+ * back.
+ */
if (sizeof(NPY_TIME_T) == 4 && out_dts_utc->year >= 2038) {
if (is_leapyear(out_dts_utc->year)) {
/* 2036 is a leap year */
year_correction = out_dts_utc->year - 2036;
- out_dts_utc->year -= year_correction;
+ out_dts_utc->year -= year_correction; /* = 2036 */
}
else {
/* 2037 is not a leap year */
year_correction = out_dts_utc->year - 2037;
- out_dts_utc->year -= year_correction;
+ out_dts_utc->year -= year_correction; /* = 2037 */
}
}
@@ -286,7 +335,7 @@ convert_datetimestruct_local_to_utc(npy_datetimestruct *out_dts_utc,
out_dts_utc->year = tm_.tm_year + 1900;
}
- /* Reapply the year 2038 year correction HACK */
+ /* Reapply the year 2038 year correction */
out_dts_utc->year += year_correction;
return 0;
@@ -1053,7 +1102,8 @@ make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen,
/*
* Only do local time within a reasonable year range. The years
* earlier than 1970 are not made local, because the Windows API
- * raises an error when they are attempted. For consistency, this
+ * raises an error when they are attempted (see the comments above the
+ * get_localtime() function). For consistency, this
* restriction is applied to all platforms.
*
* Note that this only affects how the datetime becomes a string.
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 6bb1cded5..10d4023d5 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -3497,7 +3497,5 @@ NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 01cb93562..3923709ed 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -669,15 +669,9 @@ get_nbo_cast_numeric_transfer_function(int aligned,
cls = PyObject_GetAttrString(obj, "ComplexWarning");
Py_DECREF(obj);
}
-#if PY_VERSION_HEX >= 0x02050000
ret = PyErr_WarnEx(cls,
- "Casting complex values to real discards "
- "the imaginary part", 1);
-#else
- ret = PyErr_Warn(cls,
- "Casting complex values to real discards "
- "the imaginary part");
-#endif
+ "Casting complex values to real discards "
+ "the imaginary part", 1);
Py_XDECREF(cls);
if (ret < 0) {
return NPY_FAIL;
@@ -1475,14 +1469,6 @@ get_cast_transfer_function(int aligned,
npy_intp src_itemsize = src_dtype->elsize,
dst_itemsize = dst_dtype->elsize;
- if (src_dtype->type_num == dst_dtype->type_num &&
- src_dtype->type_num != NPY_DATETIME &&
- src_dtype->type_num != NPY_TIMEDELTA) {
- PyErr_SetString(PyExc_ValueError,
- "low level cast function is for unequal type numbers");
- return NPY_FAIL;
- }
-
if (get_nbo_cast_transfer_function(aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 2c59533f8..56b1ce746 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -17,12 +17,13 @@
#include <numpy/arrayobject.h>
#include <numpy/halffloat.h>
#include <npy_pycompat.h>
+#include <npy_config.h>
#include <ctype.h>
#include "convert.h"
-#ifdef __SSE__
+#ifdef HAVE_XMMINTRIN_H
#define EINSUM_USE_SSE1 1
#else
#define EINSUM_USE_SSE1 0
@@ -31,7 +32,7 @@
/*
* TODO: Only some SSE2 for float64 is implemented.
*/
-#ifdef __SSE2__
+#ifdef HAVE_EMMINTRIN_H
#define EINSUM_USE_SSE2 1
#else
#define EINSUM_USE_SSE2 0
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index ef04bdb20..6e06103a8 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -90,8 +90,33 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
* Check whether the given array is stored contiguously
* in memory. And update the passed in ap flags apropriately.
*
- * A dimension == 1 stride is ignored for contiguous flags and a 0-sized array
- * is always both C- and F-Contiguous. 0-strided arrays are not contiguous.
+ * The traditional rule is that for an array to be flagged as C contiguous,
+ * the following must hold:
+ *
+ * strides[-1] == itemsize
+ * strides[i] == shape[i+1] * strides[i + 1]
+ *
+ * And for an array to be flagged as F contiguous, the obvious reversal:
+ *
+ * strides[0] == itemsize
+ * strides[i] == shape[i - 1] * strides[i - 1]
+ *
+ * According to these rules, a 0- or 1-dimensional array is either both
+ * C- and F-contiguous, or neither; and an array with 2+ dimensions
+ * can be C- or F- contiguous, or neither, but not both. Though there
+ * there are exceptions for arrays with zero or one item, in the first
+ * case the check is relaxed up to and including the first dimension
+ * with shape[i] == 0. In the second case `strides == itemsize` will
+ * can be true for all dimensions and both flags are set.
+ *
+ * When NPY_RELAXED_STRIDES_CHECKING is set, we use a more accurate
+ * definition of C- and F-contiguity, in which all 0-sized arrays are
+ * contiguous (regardless of dimensionality), and if shape[i] == 1
+ * then we ignore strides[i] (since it has no affect on memory layout).
+ * With these new rules, it is possible for e.g. a 10x1 array to be both
+ * C- and F-contiguous -- but, they break downstream code which assumes
+ * that for contiguous arrays strides[-1] (resp. strides[0]) always
+ * contains the itemsize.
*/
static void
_UpdateContiguousFlags(PyArrayObject *ap)
@@ -101,9 +126,10 @@ _UpdateContiguousFlags(PyArrayObject *ap)
int i;
npy_bool is_c_contig = 1;
- sd = PyArray_DESCR(ap)->elsize;
+ sd = PyArray_ITEMSIZE(ap);
for (i = PyArray_NDIM(ap) - 1; i >= 0; --i) {
dim = PyArray_DIMS(ap)[i];
+#if NPY_RELAXED_STRIDES_CHECKING
/* contiguous by definition */
if (dim == 0) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -116,6 +142,17 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if (PyArray_STRIDES(ap)[i] != sd) {
+ is_c_contig = 0;
+ break;
+ }
+ /* contiguous, if it got this far */
+ if (dim == 0) {
+ break;
+ }
+ sd *= dim;
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
if (is_c_contig) {
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_C_CONTIGUOUS);
@@ -125,9 +162,10 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
/* check if fortran contiguous */
- sd = PyArray_DESCR(ap)->elsize;
+ sd = PyArray_ITEMSIZE(ap);
for (i = 0; i < PyArray_NDIM(ap); ++i) {
dim = PyArray_DIMS(ap)[i];
+#if NPY_RELAXED_STRIDES_CHECKING
if (dim != 1) {
if (PyArray_STRIDES(ap)[i] != sd) {
PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
@@ -135,6 +173,16 @@ _UpdateContiguousFlags(PyArrayObject *ap)
}
sd *= dim;
}
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ if (PyArray_STRIDES(ap)[i] != sd) {
+ PyArray_CLEARFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
+ return;
+ }
+ if (dim == 0) {
+ break;
+ }
+ sd *= dim;
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
PyArray_ENABLEFLAGS(ap, NPY_ARRAY_F_CONTIGUOUS);
return;
@@ -595,11 +643,7 @@ arrayflags_richcompare(PyObject *self, PyObject *other, int cmp_op)
}
static PyMappingMethods arrayflags_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)NULL, /*mp_length*/
-#else
- (inquiry)NULL, /*mp_length*/
-#endif
(binaryfunc)arrayflags_getitem, /*mp_subscript*/
(objobjargproc)arrayflags_setitem, /*mp_ass_subscript*/
};
@@ -677,7 +721,5 @@ NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 32212ebc4..6ad4cc13a 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -102,6 +102,8 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
PyArrayObject *new;
npy_intp numbytes = 0;
npy_intp offset = 0;
+ npy_intp lower_offset = 0;
+ npy_intp upper_offset = 0;
Py_ssize_t buf_len;
char *buf;
@@ -131,25 +133,30 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
if (PyArray_BASE(new) && PyObject_AsReadBuffer(PyArray_BASE(new),
(const void **)&buf,
&buf_len) >= 0) {
- offset = PyArray_DATA(self) - buf;
+ offset = PyArray_BYTES(self) - buf;
numbytes = buf_len + offset;
}
else {
PyErr_Clear();
- numbytes = PyArray_MultiplyList(PyArray_DIMS(new),
- PyArray_NDIM(new))*PyArray_DESCR(new)->elsize;
- offset = PyArray_DATA(self) - PyArray_DATA(new);
+ offset_bounds_from_strides(PyArray_ITEMSIZE(new), PyArray_NDIM(new),
+ PyArray_DIMS(new), PyArray_STRIDES(new),
+ &lower_offset, &upper_offset);
+
+ offset = PyArray_BYTES(self) - (PyArray_BYTES(new) + lower_offset);
+ numbytes = upper_offset - lower_offset;
}
- if (!PyArray_CheckStrides(PyArray_DESCR(self)->elsize, PyArray_NDIM(self), numbytes,
- offset,
+ /* numbytes == 0 is special here, but the 0-size array case always works */
+ if (!PyArray_CheckStrides(PyArray_ITEMSIZE(self), PyArray_NDIM(self),
+ numbytes, offset,
PyArray_DIMS(self), newstrides.ptr)) {
PyErr_SetString(PyExc_ValueError, "strides is not "\
"compatible with available memory");
goto fail;
}
memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
- PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
+ PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS |
+ NPY_ARRAY_ALIGNED);
PyDimMem_FREE(newstrides.ptr);
return 0;
@@ -383,7 +390,7 @@ static PyObject *
array_size_get(PyArrayObject *self)
{
npy_intp size=PyArray_SIZE(self);
-#if NPY_SIZEOF_INTP <= SIZEOF_LONG
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
return PyInt_FromLong((long) size);
#else
if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) {
@@ -399,7 +406,7 @@ static PyObject *
array_nbytes_get(PyArrayObject *self)
{
npy_intp nbytes = PyArray_NBYTES(self);
-#if NPY_SIZEOF_INTP <= SIZEOF_LONG
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
return PyInt_FromLong((long) nbytes);
#else
if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) {
@@ -665,7 +672,7 @@ _get_part(PyArrayObject *self, int imag)
PyArray_NDIM(self),
PyArray_DIMS(self),
PyArray_STRIDES(self),
- PyArray_DATA(self) + offset,
+ PyArray_BYTES(self) + offset,
PyArray_FLAGS(self), (PyObject *)self);
if (ret == NULL) {
return NULL;
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 3b66ad66a..b2be4f464 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -20,6 +20,7 @@
#include "item_selection.h"
#include "npy_sort.h"
+#include "npy_partition.h"
/*NUMPY_API
* Take
@@ -31,10 +32,11 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
PyArray_Descr *dtype;
PyArray_FastTakeFunc *func;
PyArrayObject *obj = NULL, *self, *indices;
- npy_intp nd, i, j, n, m, max_item, tmp, chunk, nelem;
+ npy_intp nd, i, j, n, m, k, max_item, tmp, chunk, itemsize, nelem;
npy_intp shape[NPY_MAXDIMS];
- char *src, *dest;
+ char *src, *dest, *tmp_src;
int err;
+ npy_bool needs_refcounting;
indices = NULL;
self = (PyArrayObject *)PyArray_CheckAxis(self0, &axis,
@@ -44,13 +46,11 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
}
indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0,
NPY_INTP,
- 1, 0);
+ 0, 0);
if (indices == NULL) {
goto fail;
}
-
-
n = m = chunk = 1;
nd = PyArray_NDIM(self) + PyArray_NDIM(indices) - 1;
for (i = 0; i < nd; i++) {
@@ -112,9 +112,18 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
max_item = PyArray_DIMS(self)[axis];
nelem = chunk;
- chunk = chunk * PyArray_DESCR(obj)->elsize;
+ itemsize = PyArray_ITEMSIZE(obj);
+ chunk = chunk * itemsize;
src = PyArray_DATA(self);
dest = PyArray_DATA(obj);
+ needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(self));
+
+ if ((max_item == 0) && (PyArray_SIZE(obj) != 0)) {
+ /* Index error, since that is the usual error for raise mode */
+ PyErr_SetString(PyExc_IndexError,
+ "cannot do a non-empty take from an empty axes.");
+ goto fail;
+ }
func = PyArray_DESCR(self)->f->fasttake;
if (func == NULL) {
@@ -126,8 +135,20 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
if (check_and_adjust_index(&tmp, max_item, axis) < 0) {
goto fail;
}
- memmove(dest, src + tmp*chunk, chunk);
- dest += chunk;
+ tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (k=0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
+ PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
}
src += chunk*max_item;
}
@@ -146,8 +167,20 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
tmp -= max_item;
}
}
- memmove(dest, src + tmp*chunk, chunk);
- dest += chunk;
+ tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (k=0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
+ PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
}
src += chunk*max_item;
}
@@ -162,8 +195,20 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
else if (tmp >= max_item) {
tmp = max_item - 1;
}
- memmove(dest, src+tmp*chunk, chunk);
- dest += chunk;
+ tmp_src = src + tmp * chunk;
+ if (needs_refcounting) {
+ for (k=0; k < nelem; k++) {
+ PyArray_Item_INCREF(tmp_src, PyArray_DESCR(self));
+ PyArray_Item_XDECREF(dest, PyArray_DESCR(self));
+ memmove(dest, tmp_src, itemsize);
+ dest += itemsize;
+ tmp_src += itemsize;
+ }
+ }
+ else {
+ memmove(dest, tmp_src, chunk);
+ dest += chunk;
+ }
}
src += chunk*max_item;
}
@@ -178,7 +223,6 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
}
}
- PyArray_INCREF(obj);
Py_XDECREF(indices);
Py_XDECREF(self);
if (out != NULL && out != obj) {
@@ -252,7 +296,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
switch(clipmode) {
case NPY_RAISE:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk*(i % nv);
+ src = PyArray_BYTES(values) + chunk*(i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (check_and_adjust_index(&tmp, max_item, 0) < 0) {
goto fail;
@@ -264,7 +308,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
break;
case NPY_WRAP:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (tmp < 0) {
while (tmp < 0) {
@@ -283,7 +327,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
break;
case NPY_CLIP:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (tmp < 0) {
tmp = 0;
@@ -302,7 +346,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
switch(clipmode) {
case NPY_RAISE:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (check_and_adjust_index(&tmp, max_item, 0) < 0) {
goto fail;
@@ -312,7 +356,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
break;
case NPY_WRAP:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (tmp < 0) {
while (tmp < 0) {
@@ -329,7 +373,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
break;
case NPY_CLIP:
for (i = 0; i < ni; i++) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
tmp = ((npy_intp *)(PyArray_DATA(indices)))[i];
if (tmp < 0) {
tmp = 0;
@@ -428,7 +472,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
for (i = 0; i < ni; i++) {
tmp = ((npy_bool *)(PyArray_DATA(mask)))[i];
if (tmp) {
- src = PyArray_DATA(values) + chunk * (i % nv);
+ src = PyArray_BYTES(values) + chunk * (i % nv);
PyArray_Item_INCREF(src, PyArray_DESCR(self));
PyArray_Item_XDECREF(dest+i*chunk, PyArray_DESCR(self));
memmove(dest + i * chunk, src, chunk);
@@ -441,7 +485,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
for (i = 0; i < ni; i++) {
tmp = ((npy_bool *)(PyArray_DATA(mask)))[i];
if (tmp) {
- src = PyArray_DATA(values) + chunk*(i % nv);
+ src = PyArray_BYTES(values) + chunk*(i % nv);
memmove(dest + i*chunk, src, chunk);
}
}
@@ -725,14 +769,19 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
* over all but the desired sorting axis.
*/
static int
-_new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+_new_sortlike(PyArrayObject *op, int axis,
+ NPY_SORTKIND swhich,
+ PyArray_PartitionFunc * part,
+ NPY_SELECTKIND pwhich,
+ npy_intp * kth, npy_intp nkth)
{
PyArrayIterObject *it;
int needcopy = 0, swap;
npy_intp N, size;
int elsize;
npy_intp astride;
- PyArray_SortFunc *sort;
+ PyArray_SortFunc *sort = NULL;
+
NPY_BEGIN_THREADS_DEF;
it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis);
@@ -742,7 +791,10 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
}
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
- sort = PyArray_DESCR(op)->f->sort[which];
+ if (part == NULL) {
+ sort = PyArray_DESCR(op)->f->sort[swhich];
+ }
+
size = it->size;
N = PyArray_DIMS(op)[axis];
elsize = PyArray_DESCR(op)->elsize;
@@ -752,6 +804,9 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
(astride != (npy_intp) elsize) || swap;
if (needcopy) {
char *buffer = PyDataMem_NEW(N*elsize);
+ if (buffer == NULL) {
+ goto fail;
+ }
while (size--) {
_unaligned_strided_byte_copy(buffer, (npy_intp) elsize, it->dataptr,
@@ -759,9 +814,22 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
if (swap) {
_strided_byte_swap(buffer, (npy_intp) elsize, N, elsize);
}
- if (sort(buffer, N, op) < 0) {
- PyDataMem_FREE(buffer);
- goto fail;
+ if (part == NULL) {
+ if (sort(buffer, N, op) < 0) {
+ PyDataMem_FREE(buffer);
+ goto fail;
+ }
+ }
+ else {
+ npy_intp pivots[NPY_MAX_PIVOT_STACK];
+ npy_intp npiv = 0;
+ npy_intp i;
+ for (i = 0; i < nkth; i++) {
+ if (part(buffer, N, kth[i], pivots, &npiv, op) < 0) {
+ PyDataMem_FREE(buffer);
+ goto fail;
+ }
+ }
}
if (swap) {
_strided_byte_swap(buffer, (npy_intp) elsize, N, elsize);
@@ -774,8 +842,20 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
}
else {
while (size--) {
- if (sort(it->dataptr, N, op) < 0) {
- goto fail;
+ if (part == NULL) {
+ if (sort(it->dataptr, N, op) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ npy_intp pivots[NPY_MAX_PIVOT_STACK];
+ npy_intp npiv = 0;
+ npy_intp i;
+ for (i = 0; i < nkth; i++) {
+ if (part(it->dataptr, N, kth[i], pivots, &npiv, op) < 0) {
+ goto fail;
+ }
+ }
}
PyArray_ITER_NEXT(it);
}
@@ -785,23 +865,30 @@ _new_sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
return 0;
fail:
+ /* Out of memory during sorting or buffer creation */
NPY_END_THREADS;
+ PyErr_NoMemory();
Py_DECREF(it);
- return 0;
+ return -1;
}
static PyObject*
-_new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+_new_argsortlike(PyArrayObject *op, int axis,
+ NPY_SORTKIND swhich,
+ PyArray_ArgPartitionFunc * argpart,
+ NPY_SELECTKIND pwhich,
+ npy_intp * kth, npy_intp nkth)
{
PyArrayIterObject *it = NULL;
PyArrayIterObject *rit = NULL;
PyArrayObject *ret;
- int needcopy = 0, i;
- npy_intp N, size;
- int elsize, swap;
+ npy_intp N, size, i;
npy_intp astride, rstride, *iptr;
- PyArray_ArgSortFunc *argsort;
+ int elsize;
+ int needcopy = 0, swap;
+ PyArray_ArgSortFunc *argsort = NULL;
+
NPY_BEGIN_THREADS_DEF;
ret = (PyArrayObject *)PyArray_New(Py_TYPE(op),
@@ -820,7 +907,12 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
swap = !PyArray_ISNOTSWAPPED(op);
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
- argsort = PyArray_DESCR(op)->f->argsort[which];
+ if (argpart == NULL) {
+ argsort = PyArray_DESCR(op)->f->argsort[swhich];
+ }
+ else {
+ argpart = get_argpartition_func(PyArray_TYPE(op), pwhich);
+ }
size = it->size;
N = PyArray_DIMS(op)[axis];
elsize = PyArray_DESCR(op)->elsize;
@@ -834,7 +926,14 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
char *valbuffer, *indbuffer;
valbuffer = PyDataMem_NEW(N*elsize);
+ if (valbuffer == NULL) {
+ goto fail;
+ }
indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
+ if (indbuffer == NULL) {
+ PyDataMem_FREE(valbuffer);
+ goto fail;
+ }
while (size--) {
_unaligned_strided_byte_copy(valbuffer, (npy_intp) elsize, it->dataptr,
astride, N, elsize);
@@ -845,10 +944,25 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
for (i = 0; i < N; i++) {
*iptr++ = i;
}
- if (argsort(valbuffer, (npy_intp *)indbuffer, N, op) < 0) {
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(indbuffer);
- goto fail;
+ if (argpart == NULL) {
+ if (argsort(valbuffer, (npy_intp *)indbuffer, N, op) < 0) {
+ PyDataMem_FREE(valbuffer);
+ PyDataMem_FREE(indbuffer);
+ goto fail;
+ }
+ }
+ else {
+ npy_intp pivots[NPY_MAX_PIVOT_STACK];
+ npy_intp npiv = 0;
+ npy_intp i;
+ for (i = 0; i < nkth; i++) {
+ if (argpart(valbuffer, (npy_intp *)indbuffer,
+ N, kth[i], pivots, &npiv, op) < 0) {
+ PyDataMem_FREE(valbuffer);
+ PyDataMem_FREE(indbuffer);
+ goto fail;
+ }
+ }
}
_unaligned_strided_byte_copy(rit->dataptr, rstride, indbuffer,
sizeof(npy_intp), N, sizeof(npy_intp));
@@ -864,8 +978,22 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
for (i = 0; i < N; i++) {
*iptr++ = i;
}
- if (argsort(it->dataptr, (npy_intp *)rit->dataptr, N, op) < 0) {
- goto fail;
+ if (argpart == NULL) {
+ if (argsort(it->dataptr, (npy_intp *)rit->dataptr,
+ N, op) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ npy_intp pivots[NPY_MAX_PIVOT_STACK];
+ npy_intp npiv = 0;
+ npy_intp i;
+ for (i = 0; i < nkth; i++) {
+ if (argpart(it->dataptr, (npy_intp *)rit->dataptr,
+ N, kth[i], pivots, &npiv, op) < 0) {
+ goto fail;
+ }
+ }
}
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(rit);
@@ -880,13 +1008,16 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which)
fail:
NPY_END_THREADS;
+ if (!PyErr_Occurred()) {
+ /* Out of memory during sorting or buffer creation */
+ PyErr_NoMemory();
+ }
Py_DECREF(ret);
Py_XDECREF(it);
Py_XDECREF(rit);
return NULL;
}
-
/* Be sure to save this global_compare when necessary */
static PyArrayObject *global_obj;
@@ -954,7 +1085,8 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
PyArrayObject *ap = NULL, *store_arr = NULL;
char *ip;
- int i, n, m, elsize, orign;
+ npy_intp i, n, m;
+ int elsize, orign;
int res = 0;
int axis_orig = axis;
int (*sort)(void *, size_t, size_t, npy_comparator);
@@ -976,7 +1108,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
/* Determine if we should use type-specific algorithm or not */
if (PyArray_DESCR(op)->f->sort[which] != NULL) {
- return _new_sort(op, axis, which);
+ return _new_sortlike(op, axis, which, NULL, 0, NULL, 0);
}
if (PyArray_DESCR(op)->f->compare == NULL) {
@@ -985,6 +1117,8 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
return -1;
}
+ SWAPAXES2(op);
+
switch (which) {
case NPY_QUICKSORT :
sort = npy_quicksort;
@@ -1001,9 +1135,174 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
goto fail;
}
+ ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op,
+ NULL, 1, 0,
+ NPY_ARRAY_DEFAULT | NPY_ARRAY_UPDATEIFCOPY, NULL);
+ if (ap == NULL) {
+ goto fail;
+ }
+ elsize = PyArray_DESCR(ap)->elsize;
+ m = PyArray_DIMS(ap)[PyArray_NDIM(ap)-1];
+ if (m == 0) {
+ goto finish;
+ }
+ n = PyArray_SIZE(ap)/m;
+
+ /* Store global -- allows re-entry -- restore before leaving*/
+ store_arr = global_obj;
+ global_obj = ap;
+ for (ip = PyArray_DATA(ap), i = 0; i < n; i++, ip += elsize*m) {
+ res = sort(ip, m, elsize, sortCompare);
+ if (res < 0) {
+ break;
+ }
+ }
+ global_obj = store_arr;
+
+ if (PyErr_Occurred()) {
+ goto fail;
+ }
+ else if (res == -NPY_ENOMEM) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ else if (res == -NPY_ECOMP) {
+ PyErr_SetString(PyExc_TypeError,
+ "sort comparison failed");
+ goto fail;
+ }
+
+
+ finish:
+ Py_DECREF(ap); /* Should update op if needed */
+ SWAPBACK2(op);
+ return 0;
+
+ fail:
+ Py_XDECREF(ap);
+ SWAPBACK2(op);
+ return -1;
+}
+
+
+/*
+ * make kth array positive, ravel and sort it
+ */
+static PyArrayObject *
+partition_prep_kth_array(PyArrayObject * ktharray,
+ PyArrayObject * op,
+ int axis)
+{
+ const npy_intp * shape = PyArray_SHAPE(op);
+ PyArrayObject * kthrvl;
+ npy_intp * kth;
+ npy_intp nkth, i;
+
+ if (!PyArray_CanCastSafely(PyArray_TYPE(ktharray), NPY_INTP)) {
+ if (DEPRECATE("Calling partition with a non integer index"
+ " will result in an error in the future") < 0) {
+ return NULL;
+ }
+ }
+
+ if (PyArray_NDIM(ktharray) > 1) {
+ PyErr_Format(PyExc_ValueError, "kth array must have dimension <= 1");
+ return NULL;
+ }
+ kthrvl = PyArray_Cast(ktharray, NPY_INTP);
+
+ if (kthrvl == NULL)
+ return NULL;
+
+ kth = PyArray_DATA(kthrvl);
+ nkth = PyArray_SIZE(kthrvl);
+
+ for (i = 0; i < nkth; i++) {
+ if (kth[i] < 0) {
+ kth[i] += shape[axis];
+ }
+ if (PyArray_SIZE(op) != 0 && ((kth[i] < 0) || (kth[i] >= shape[axis]))) {
+ PyErr_Format(PyExc_ValueError, "kth(=%zd) out of bounds (%zd)",
+ kth[i], shape[axis]);
+ Py_XDECREF(kthrvl);
+ return NULL;
+ }
+ }
+
+ /*
+ * sort the array of kths so the partitions will
+ * not trample on each other
+ */
+ PyArray_Sort(kthrvl, -1, NPY_QUICKSORT);
+
+ return kthrvl;
+}
+
+
+
+/*NUMPY_API
+ * Partition an array in-place
+ */
+NPY_NO_EXPORT int
+PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, NPY_SELECTKIND which)
+{
+ PyArrayObject *ap = NULL, *store_arr = NULL;
+ char *ip;
+ npy_intp i, n, m;
+ int elsize, orign;
+ int res = 0;
+ int axis_orig = axis;
+ int (*sort)(void *, size_t, size_t, npy_comparator);
+ PyArray_PartitionFunc * part = get_partition_func(PyArray_TYPE(op), which);
+
+ n = PyArray_NDIM(op);
+ if ((n == 0)) {
+ return 0;
+ }
+ if (axis < 0) {
+ axis += n;
+ }
+ if ((axis < 0) || (axis >= n)) {
+ PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis_orig);
+ return -1;
+ }
+ if (PyArray_FailUnlessWriteable(op, "sort array") < 0) {
+ return -1;
+ }
+
+ if (part) {
+ PyArrayObject * kthrvl = partition_prep_kth_array(ktharray, op, axis);
+ if (kthrvl == NULL)
+ return -1;
+
+ res = _new_sortlike(op, axis, 0,
+ part, which,
+ PyArray_DATA(kthrvl),
+ PyArray_SIZE(kthrvl));
+ Py_DECREF(kthrvl);
+ return res;
+ }
+
+
+ if (PyArray_DESCR(op)->f->compare == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "type does not have compare function");
+ return -1;
+ }
SWAPAXES2(op);
+ /* select not implemented, use quicksort, slower but equivalent */
+ switch (which) {
+ case NPY_INTROSELECT :
+ sort = npy_quicksort;
+ break;
+ default:
+ PyErr_SetString(PyExc_TypeError,
+ "requested sort kind is not supported");
+ goto fail;
+ }
+
ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op,
NULL, 1, 0,
NPY_ARRAY_DEFAULT | NPY_ARRAY_UPDATEIFCOPY, NULL);
@@ -1020,6 +1319,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
/* Store global -- allows re-entry -- restore before leaving*/
store_arr = global_obj;
global_obj = ap;
+ /* we don't need to care about kth here as we are using a full sort */
for (ip = PyArray_DATA(ap), i = 0; i < n; i++, ip += elsize*m) {
res = sort(ip, m, elsize, sortCompare);
if (res < 0) {
@@ -1101,7 +1401,8 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
}
/* Determine if we should use new algorithm or not */
if (PyArray_DESCR(op2)->f->argsort[which] != NULL) {
- ret = (PyArrayObject *)_new_argsort(op2, axis, which);
+ ret = (PyArrayObject *)_new_argsortlike(op2, axis, which,
+ NULL, 0, NULL, 0);
Py_DECREF(op2);
return (PyObject *)ret;
}
@@ -1196,6 +1497,143 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
/*NUMPY_API
+ * ArgPartition an array
+ */
+NPY_NO_EXPORT PyObject *
+PyArray_ArgPartition(PyArrayObject *op, PyArrayObject * ktharray, int axis, NPY_SELECTKIND which)
+{
+ PyArrayObject *ap = NULL, *ret = NULL, *store, *op2;
+ npy_intp *ip;
+ npy_intp i, j, n, m, orign;
+ int argsort_elsize;
+ char *store_ptr;
+ int res = 0;
+ int (*sort)(void *, size_t, size_t, npy_comparator);
+ PyArray_ArgPartitionFunc * argpart =
+ get_argpartition_func(PyArray_TYPE(op), which);
+
+ n = PyArray_NDIM(op);
+ if ((n == 0) || (PyArray_SIZE(op) == 1)) {
+ ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
+ PyArray_DIMS(op),
+ NPY_INTP,
+ NULL, NULL, 0, 0,
+ (PyObject *)op);
+ if (ret == NULL) {
+ return NULL;
+ }
+ *((npy_intp *)PyArray_DATA(ret)) = 0;
+ return (PyObject *)ret;
+ }
+
+ /* Creates new reference op2 */
+ if ((op2=(PyArrayObject *)PyArray_CheckAxis(op, &axis, 0)) == NULL) {
+ return NULL;
+ }
+
+ /* Determine if we should use new algorithm or not */
+ if (argpart) {
+ PyArrayObject * kthrvl = partition_prep_kth_array(ktharray, op2, axis);
+ if (kthrvl == NULL) {
+ Py_DECREF(op2);
+ return NULL;
+ }
+
+ ret = (PyArrayObject *)_new_argsortlike(op2, axis, 0,
+ argpart, which,
+ PyArray_DATA(kthrvl),
+ PyArray_SIZE(kthrvl));
+ Py_DECREF(kthrvl);
+ Py_DECREF(op2);
+ return (PyObject *)ret;
+ }
+
+ if (PyArray_DESCR(op2)->f->compare == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "type does not have compare function");
+ Py_DECREF(op2);
+ op = NULL;
+ goto fail;
+ }
+
+ /* select not implemented, use quicksort, slower but equivalent */
+ switch (which) {
+ case NPY_INTROSELECT :
+ sort = npy_quicksort;
+ break;
+ default:
+ PyErr_SetString(PyExc_TypeError,
+ "requested sort kind is not supported");
+ Py_DECREF(op2);
+ op = NULL;
+ goto fail;
+ }
+
+ /* ap will contain the reference to op2 */
+ SWAPAXES(ap, op2);
+ op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap,
+ NPY_NOTYPE,
+ 1, 0);
+ Py_DECREF(ap);
+ if (op == NULL) {
+ return NULL;
+ }
+ ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
+ PyArray_DIMS(op), NPY_INTP,
+ NULL, NULL, 0, 0, (PyObject *)op);
+ if (ret == NULL) {
+ goto fail;
+ }
+ ip = (npy_intp *)PyArray_DATA(ret);
+ argsort_elsize = PyArray_DESCR(op)->elsize;
+ m = PyArray_DIMS(op)[PyArray_NDIM(op)-1];
+ if (m == 0) {
+ goto finish;
+ }
+ n = PyArray_SIZE(op)/m;
+ store_ptr = global_data;
+ global_data = PyArray_DATA(op);
+ store = global_obj;
+ global_obj = op;
+ /* we don't need to care about kth here as we are using a full sort */
+ for (i = 0; i < n; i++, ip += m, global_data += m*argsort_elsize) {
+ for (j = 0; j < m; j++) {
+ ip[j] = j;
+ }
+ res = sort((char *)ip, m, sizeof(npy_intp), argsort_static_compare);
+ if (res < 0) {
+ break;
+ }
+ }
+ global_data = store_ptr;
+ global_obj = store;
+
+ if (PyErr_Occurred()) {
+ goto fail;
+ }
+ else if (res == -NPY_ENOMEM) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ else if (res == -NPY_ECOMP) {
+ PyErr_SetString(PyExc_TypeError,
+ "sort comparison failed");
+ goto fail;
+ }
+
+ finish:
+ Py_DECREF(op);
+ SWAPBACK(op, ret);
+ return (PyObject *)op;
+
+ fail:
+ Py_XDECREF(op);
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+
+/*NUMPY_API
*LexSort an array providing indices that will sort a collection of arrays
*lexicographically. The first key is sorted on first, followed by the second key
*-- requires that arg"merge"sort is available for each sort_key
@@ -1210,13 +1648,12 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
PyArrayIterObject **its;
PyArrayObject *ret = NULL;
PyArrayIterObject *rit = NULL;
- int n;
+ npy_intp n, N, size, i, j;
+ npy_intp astride, rstride, *iptr;
int nd;
- int needcopy = 0, i,j;
- npy_intp N, size;
+ int needcopy = 0;
int elsize;
int maxelsize;
- npy_intp astride, rstride, *iptr;
int object = 0;
PyArray_ArgSortFunc *argsort;
NPY_BEGIN_THREADS_DEF;
@@ -1227,11 +1664,11 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
"need sequence of keys with len > 0 in lexsort");
return NULL;
}
- mps = (PyArrayObject **) PyArray_malloc(n*NPY_SIZEOF_PYARRAYOBJECT);
+ mps = (PyArrayObject **) PyArray_malloc(n * sizeof(PyArrayObject *));
if (mps == NULL) {
return PyErr_NoMemory();
}
- its = (PyArrayIterObject **) PyArray_malloc(n*sizeof(PyArrayIterObject));
+ its = (PyArrayIterObject **) PyArray_malloc(n * sizeof(PyArrayIterObject *));
if (its == NULL) {
PyArray_free(mps);
return PyErr_NoMemory();
@@ -1260,7 +1697,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
}
if (!PyArray_DESCR(mps[i])->f->argsort[NPY_MERGESORT]) {
PyErr_Format(PyExc_TypeError,
- "merge sort not available for item %d", i);
+ "merge sort not available for item %zd", i);
goto fail;
}
if (!object
@@ -1333,7 +1770,14 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
int *swaps;
valbuffer = PyDataMem_NEW(N*maxelsize);
+ if (valbuffer == NULL) {
+ goto fail;
+ }
indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
+ if (indbuffer == NULL) {
+ PyDataMem_FREE(indbuffer);
+ goto fail;
+ }
swaps = malloc(n*sizeof(int));
for (j = 0; j < n; j++) {
swaps[j] = PyArray_ISBYTESWAPPED(mps[j]);
@@ -1402,6 +1846,10 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
fail:
NPY_END_THREADS;
+ if (!PyErr_Occurred()) {
+ /* Out of memory during sorting or buffer creation */
+ PyErr_NoMemory();
+ }
Py_XDECREF(rit);
Py_XDECREF(ret);
for (i = 0; i < n; i++) {
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 2e464a3db..4fbdd5e54 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -91,7 +91,8 @@ NPY_NO_EXPORT int
parse_index(PyArrayObject *self, PyObject *op,
npy_intp *out_dimensions,
npy_intp *out_strides,
- npy_intp *out_offset)
+ npy_intp *out_offset,
+ int check_index)
{
int i, j, n;
int nd_old, nd_new, n_add, n_ellipsis;
@@ -130,7 +131,8 @@ parse_index(PyArrayObject *self, PyObject *op,
start = parse_index_entry(op1, &step_size, &n_steps,
nd_old < PyArray_NDIM(self) ?
PyArray_DIMS(self)[nd_old] : 0,
- nd_old, nd_old < PyArray_NDIM(self));
+ nd_old, check_index ?
+ nd_old < PyArray_NDIM(self) : 0);
Py_DECREF(op1);
if (start == -1) {
break;
@@ -196,26 +198,8 @@ parse_index(PyArrayObject *self, PyObject *op,
static int
slice_coerce_index(PyObject *o, npy_intp *v)
{
- /*
- * PyNumber_Index was introduced in Python 2.5 because of NumPy.
- * http://www.python.org/dev/peps/pep-0357/
- * Let's use it for indexing!
- *
- * Unfortunately, SciPy and possibly other code seems to rely
- * on the lenient coercion. :(
- */
-#if 0 /*PY_VERSION_HEX >= 0x02050000*/
- PyObject *ind = PyNumber_Index(o);
- if (ind != NULL) {
- *v = PyArray_PyIntAsIntp(ind);
- Py_DECREF(ind);
- }
- else {
- *v = -1;
- }
-#else
*v = PyArray_PyIntAsIntp(o);
-#endif
+
if ((*v) == -1 && PyErr_Occurred()) {
PyErr_Clear();
return 0;
@@ -223,8 +207,14 @@ slice_coerce_index(PyObject *o, npy_intp *v)
return 1;
}
-/* This is basically PySlice_GetIndicesEx, but with our coercion
- * of indices to integers (plus, that function is new in Python 2.3) */
+
+/*
+ * This is basically PySlice_GetIndicesEx, but with our coercion
+ * of indices to integers (plus, that function is new in Python 2.3)
+ *
+ * N.B. The coercion to integers is deprecated; once the DeprecationWarning
+ * is changed to an error, it would seem that this is obsolete.
+ */
NPY_NO_EXPORT int
slice_GetIndices(PySliceObject *r, npy_intp length,
npy_intp *start, npy_intp *stop, npy_intp *step,
@@ -1043,7 +1033,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
skip:
Py_INCREF(type);
- arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, 0, NULL);
+ arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0,
+ NPY_ARRAY_FORCECAST, NULL);
if (arrval == NULL) {
return -1;
}
@@ -1145,11 +1136,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
static PyMappingMethods iter_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)iter_length, /*mp_length*/
-#else
- (inquiry)iter_length, /*mp_length*/
-#endif
(binaryfunc)iter_subscript, /*mp_subscript*/
(objobjargproc)iter_ass_subscript, /*mp_ass_subscript*/
};
@@ -1358,9 +1345,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/** END of Array Iterator **/
@@ -1677,7 +1662,7 @@ arraymultiter_dealloc(PyArrayMultiIterObject *multi)
static PyObject *
arraymultiter_size_get(PyArrayMultiIterObject *self)
{
-#if NPY_SIZEOF_INTP <= SIZEOF_LONG
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
return PyInt_FromLong((long) self->size);
#else
if (self->size < NPY_MAX_LONG) {
@@ -1692,7 +1677,7 @@ arraymultiter_size_get(PyArrayMultiIterObject *self)
static PyObject *
arraymultiter_index_get(PyArrayMultiIterObject *self)
{
-#if NPY_SIZEOF_INTP <= SIZEOF_LONG
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
return PyInt_FromLong((long) self->index);
#else
if (self->size < NPY_MAX_LONG) {
@@ -1835,9 +1820,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/*========================= Neighborhood iterator ======================*/
@@ -2167,7 +2150,5 @@ NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/iterators.h b/numpy/core/src/multiarray/iterators.h
index 8276a3ceb..dad345935 100644
--- a/numpy/core/src/multiarray/iterators.h
+++ b/numpy/core/src/multiarray/iterators.h
@@ -9,7 +9,8 @@ NPY_NO_EXPORT int
parse_index(PyArrayObject *self, PyObject *op,
npy_intp *out_dimensions,
npy_intp *out_strides,
- npy_intp *out_offset);
+ npy_intp *out_offset,
+ int check_index);
NPY_NO_EXPORT PyObject
*iter_subscript(PyArrayIterObject *, PyObject *);
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index ef29b855e..875656394 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -21,9 +21,12 @@
#include "lowlevel_strided_loops.h"
/*
- * x86 platform may work with unaligned access, except when the
- * compiler uses aligned SSE instructions, which gcc does in some
- * cases. This is disabled for the time being.
+ * x86 platform works with unaligned access but the compiler is allowed to
+ * assume all data is aligned to its size by the C standard. This means it can
+ * vectorize instructions peeling only by the size of the type, if the data is
+ * not aligned to this size one ends up with data not correctly aligned for SSE
+ * instructions (16 byte).
+ * So this flag can only be enabled if autovectorization is disabled.
*/
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
# define NPY_USE_UNALIGNED_ACCESS 0
@@ -36,27 +39,16 @@
#define _NPY_NOP4(x) (x)
#define _NPY_NOP8(x) (x)
-#define _NPY_SWAP2(x) (((((npy_uint16)x)&0xffu) << 8) | \
- (((npy_uint16)x) >> 8))
+#define _NPY_SWAP2(x) npy_bswap2(x)
-#define _NPY_SWAP4(x) (((((npy_uint32)x)&0xffu) << 24) | \
- ((((npy_uint32)x)&0xff00u) << 8) | \
- ((((npy_uint32)x)&0xff0000u) >> 8) | \
- (((npy_uint32)x) >> 24))
+#define _NPY_SWAP4(x) npy_bswap4(x)
#define _NPY_SWAP_PAIR4(x) (((((npy_uint32)x)&0xffu) << 8) | \
((((npy_uint32)x)&0xff00u) >> 8) | \
((((npy_uint32)x)&0xff0000u) << 8) | \
((((npy_uint32)x)&0xff000000u) >> 8))
-#define _NPY_SWAP8(x) (((((npy_uint64)x)&0xffULL) << 56) | \
- ((((npy_uint64)x)&0xff00ULL) << 40) | \
- ((((npy_uint64)x)&0xff0000ULL) << 24) | \
- ((((npy_uint64)x)&0xff000000ULL) << 8) | \
- ((((npy_uint64)x)&0xff00000000ULL) >> 8) | \
- ((((npy_uint64)x)&0xff0000000000ULL) >> 24) | \
- ((((npy_uint64)x)&0xff000000000000ULL) >> 40) | \
- (((npy_uint64)x) >> 56))
+#define _NPY_SWAP8(x) npy_bswap8(x)
#define _NPY_SWAP_PAIR8(x) (((((npy_uint64)x)&0xffULL) << 24) | \
((((npy_uint64)x)&0xff00ULL) << 8) | \
@@ -67,21 +59,11 @@
((((npy_uint64)x)&0xff000000000000ULL) >> 8) | \
((((npy_uint64)x)&0xff00000000000000ULL) >> 24))
-#define _NPY_SWAP_INPLACE2(x) { \
- char a = (x)[0]; (x)[0] = (x)[1]; (x)[1] = a; \
- }
+#define _NPY_SWAP_INPLACE2(x) npy_bswap2_unaligned(x)
-#define _NPY_SWAP_INPLACE4(x) { \
- char a = (x)[0]; (x)[0] = (x)[3]; (x)[3] = a; \
- a = (x)[1]; (x)[1] = (x)[2]; (x)[2] = a; \
- }
+#define _NPY_SWAP_INPLACE4(x) npy_bswap4_unaligned(x)
-#define _NPY_SWAP_INPLACE8(x) { \
- char a = (x)[0]; (x)[0] = (x)[7]; (x)[7] = a; \
- a = (x)[1]; (x)[1] = (x)[6]; (x)[6] = a; \
- a = (x)[2]; (x)[2] = (x)[5]; (x)[5] = a; \
- a = (x)[3]; (x)[3] = (x)[4]; (x)[4] = a; \
- }
+#define _NPY_SWAP_INPLACE8(x) npy_bswap8_unaligned(x)
#define _NPY_SWAP_INPLACE16(x) { \
char a = (x)[0]; (x)[0] = (x)[15]; (x)[15] = a; \
@@ -122,7 +104,15 @@
#if @is_swap@ || @src_contig@ == 0 || @dst_contig@ == 0
+/*
+ * unrolling gains about 20-50% if the copy can be done in one mov instruction
+ * if not it can decrease performance
+ * tested to improve performance on intel xeon 5x/7x, core2duo, amd phenom x4
+ */
static void
+#if @is_aligned@ && @is_swap@ == 0 && @elsize@ <= NPY_SIZEOF_INTP
+ NPY_GCC_UNROLL_LOOPS
+#endif
@prefix@_@oper@_size@elsize@(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
@@ -179,7 +169,11 @@ static void
#endif
-/* specialized copy and swap for source stride 0 */
+/*
+ * specialized copy and swap for source stride 0,
+ * interestingly unrolling here is like above is only marginally profitable for
+ * small types and detrimental for >= 8byte moves on x86
+ */
#if (@src_contig@ == 0) && @is_aligned@
static void
@prefix@_@oper@_size@elsize@_srcstride0(char *dst,
@@ -555,7 +549,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
/**end repeat1**/
}
- return &_strided_to_strided;
+ return &_swap@tag@_strided_to_strided;
}
else {
switch (itemsize) {
@@ -603,7 +597,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
}
}
- return &_strided_to_strided;
+ return &_swap@tag@_strided_to_strided;
}
/* general dst */
else {
@@ -620,7 +614,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
/**end repeat1**/
}
- return &_strided_to_strided;
+ return &_swap@tag@_strided_to_strided;
}
/* general src */
else {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 31446ca41..25f3dad9b 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -25,7 +25,7 @@
#define SOBJ_LISTTUP 4
static PyObject *
-array_subscript_simple(PyArrayObject *self, PyObject *op);
+array_subscript_simple(PyArrayObject *self, PyObject *op, int check_index);
/******************************************************************************
*** IMPLEMENT MAPPING PROTOCOL ***
@@ -42,8 +42,29 @@ array_length(PyArrayObject *self)
}
}
+/* Get array item as scalar type */
NPY_NO_EXPORT PyObject *
-array_big_item(PyArrayObject *self, npy_intp i)
+array_item_asscalar(PyArrayObject *self, npy_intp i)
+{
+ char *item;
+ npy_intp dim0;
+
+ /* Bounds check and get the data pointer */
+ dim0 = PyArray_DIM(self, 0);
+ if (i < 0) {
+ i += dim0;
+ }
+ if (i < 0 || i >= dim0) {
+ PyErr_SetString(PyExc_IndexError, "index out of bounds");
+ return NULL;
+ }
+ item = PyArray_BYTES(self) + i * PyArray_STRIDE(self, 0);
+ return PyArray_Scalar(item, PyArray_DESCR(self), (PyObject *)self);
+}
+
+/* Get array item as ndarray type */
+NPY_NO_EXPORT PyObject *
+array_item_asarray(PyArrayObject *self, npy_intp i)
{
char *item;
PyArrayObject *ret;
@@ -60,7 +81,7 @@ array_big_item(PyArrayObject *self, npy_intp i)
if (check_and_adjust_index(&i, dim0, 0) < 0) {
return NULL;
}
- item = PyArray_DATA(self) + i * PyArray_STRIDE(self, 0);
+ item = PyArray_BYTES(self) + i * PyArray_STRIDE(self, 0);
/* Create the view array */
Py_INCREF(PyArray_DESCR(self));
@@ -86,40 +107,23 @@ array_big_item(PyArrayObject *self, npy_intp i)
return (PyObject *)ret;
}
-NPY_NO_EXPORT int
-_array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v)
-{
- return array_ass_big_item(self, (npy_intp) i, v);
-}
-
-/* contains optimization for 1-d arrays */
+/* Get array item at given index */
NPY_NO_EXPORT PyObject *
-array_item_nice(PyArrayObject *self, Py_ssize_t _i)
+array_item(PyArrayObject *self, Py_ssize_t _i)
{
/* Workaround Python 2.4: Py_ssize_t not the same as npyint_p */
npy_intp i = _i;
if (PyArray_NDIM(self) == 1) {
- char *item;
- npy_intp dim0;
-
- /* Bounds check and get the data pointer */
- dim0 = PyArray_DIM(self, 0);
- if (check_and_adjust_index(&i, dim0, 0) < 0) {
- return NULL;
- }
- item = PyArray_DATA(self) + i * PyArray_STRIDE(self, 0);
-
- return PyArray_Scalar(item, PyArray_DESCR(self), (PyObject *)self);
+ return array_item_asscalar(self, (npy_intp) i);
}
else {
- return PyArray_Return(
- (PyArrayObject *) array_big_item(self, (npy_intp) i));
+ return array_item_asarray(self, (npy_intp) i);
}
}
NPY_NO_EXPORT int
-array_ass_big_item(PyArrayObject *self, npy_intp i, PyObject *v)
+array_ass_item_object(PyArrayObject *self, npy_intp i, PyObject *v)
{
PyArrayObject *tmp;
char *item;
@@ -138,14 +142,14 @@ array_ass_big_item(PyArrayObject *self, npy_intp i, PyObject *v)
if (PyArray_NDIM(self) == 0) {
PyErr_SetString(PyExc_IndexError,
- "0-d arrays can't be indexed.");
+ "0-d arrays can't be indexed");
return -1;
}
/* For multi-dimensional arrays, use CopyObject */
if (PyArray_NDIM(self) > 1) {
- tmp = (PyArrayObject *)array_big_item(self, i);
+ tmp = (PyArrayObject *)array_item_asarray(self, i);
if(tmp == NULL) {
return -1;
}
@@ -159,9 +163,9 @@ array_ass_big_item(PyArrayObject *self, npy_intp i, PyObject *v)
if (check_and_adjust_index(&i, dim0, 0) < 0) {
return -1;
}
- item = PyArray_DATA(self) + i * PyArray_STRIDE(self, 0);
+ item = PyArray_BYTES(self) + i * PyArray_STRIDE(self, 0);
- return PyArray_DESCR(self)->f->setitem(v, item, self);
+ return PyArray_SETITEM(self, item, v);
}
/* -------------------------------------------------------------- */
@@ -222,7 +226,7 @@ PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getm
* (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1)
*/
n1 = mit->iters[0]->nd_m1 + 1;
- n2 = mit->iteraxes[0];
+ n2 = mit->consec; /* axes to insert at */
n3 = mit->nd;
/* use n1 as the boundary if getting but n2 if setting */
@@ -299,13 +303,17 @@ PyArray_GetMap(PyArrayMapIterObject *mit)
/* check for consecutive axes */
if ((mit->subspace != NULL) && (mit->consec)) {
- if (mit->iteraxes[0] > 0) { /* then we need to swap */
- PyArray_MapIterSwapAxes(mit, &ret, 1);
- }
+ PyArray_MapIterSwapAxes(mit, &ret, 1);
}
return (PyObject *)ret;
}
+NPY_NO_EXPORT int
+array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v)
+{
+ return array_ass_item_object(self, (npy_intp) i, v);
+}
+
static int
PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op)
{
@@ -328,11 +336,9 @@ PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op)
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
- if (mit->iteraxes[0] > 0) { /* then we need to swap */
- PyArray_MapIterSwapAxes(mit, &arr, 0);
- if (arr == NULL) {
- return -1;
- }
+ PyArray_MapIterSwapAxes(mit, &arr, 0);
+ if (arr == NULL) {
+ return -1;
}
}
@@ -546,7 +552,7 @@ fancy_indexing_check(PyObject *args)
*/
NPY_NO_EXPORT PyObject *
-array_subscript_simple(PyArrayObject *self, PyObject *op)
+array_subscript_simple(PyArrayObject *self, PyObject *op, int check_index)
{
npy_intp dimensions[NPY_MAXDIMS], strides[NPY_MAXDIMS];
npy_intp offset;
@@ -554,36 +560,28 @@ array_subscript_simple(PyArrayObject *self, PyObject *op)
PyArrayObject *ret;
npy_intp value;
- /*
- * PyNumber_Index was introduced in Python 2.5 because of NumPy.
- * http://www.python.org/dev/peps/pep-0357/
- * Let's use it for indexing!
- *
- * Unfortunately, SciPy and possibly other code seems to rely
- * on the lenient coercion. :(
- */
-#if 0 /*PY_VERSION_HEX >= 0x02050000*/
- PyObject *ind = PyNumber_Index(op);
- if (ind != NULL) {
- value = PyArray_PyIntAsIntp(ind);
- Py_DECREF(ind);
- }
- else {
- value = -1;
- }
-#else
- value = PyArray_PyIntAsIntp(op);
-#endif
- if (value == -1 && PyErr_Occurred()) {
- PyErr_Clear();
- }
- else {
- return array_big_item(self, value);
+ if (!(PyArray_Check(op) && (PyArray_SIZE((PyArrayObject*)op) > 1))) {
+ value = PyArray_PyIntAsIntp(op);
+
+ if (value == -1 && PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ /* Operand is not an integer type */
+ PyErr_Clear();
+ }
+ else {
+ PyErr_SetString(PyExc_IndexError,
+ "cannot convert index to integer");
+ return NULL;
+ }
+ }
+ else {
+ return array_item_asarray(self, value);
+ }
}
/* Standard (view-based) Indexing */
nd = parse_index(self, op, dimensions,
- strides, &offset);
+ strides, &offset, check_index);
if (nd == -1) {
return NULL;
}
@@ -593,7 +591,7 @@ array_subscript_simple(PyArrayObject *self, PyObject *op)
ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self),
PyArray_DESCR(self),
nd, dimensions,
- strides, PyArray_DATA(self) + offset,
+ strides, PyArray_BYTES(self) + offset,
PyArray_FLAGS(self),
(PyObject *)self);
if (ret == NULL) {
@@ -808,8 +806,7 @@ array_ass_boolean_subscript(PyArrayObject *self,
}
/* Tweak the strides for 0-dim and broadcasting cases */
- if (PyArray_NDIM(v) > 0 && PyArray_DIMS(v)[0] > 1) {
- v_stride = PyArray_STRIDES(v)[0];
+ if (PyArray_NDIM(v) > 0 && PyArray_DIMS(v)[0] != 1) {
if (size != PyArray_DIMS(v)[0]) {
PyErr_Format(PyExc_ValueError,
"NumPy boolean array indexing assignment "
@@ -818,6 +815,7 @@ array_ass_boolean_subscript(PyArrayObject *self,
(int)PyArray_DIMS(v)[0], (int)size);
return -1;
}
+ v_stride = PyArray_STRIDES(v)[0];
}
else {
v_stride = 0;
@@ -914,17 +912,162 @@ array_ass_boolean_subscript(PyArrayObject *self,
return 0;
}
-NPY_NO_EXPORT PyObject *
-array_subscript(PyArrayObject *self, PyObject *op)
+
+/* Check if ind is a tuple and if it has as many elements as arr has axes. */
+static NPY_INLINE int
+_is_full_index(PyObject *ind, PyArrayObject *arr)
{
- int nd, fancy;
- PyArrayObject *other;
- PyArrayMapIterObject *mit;
+ return PyTuple_Check(ind) && (PyTuple_GET_SIZE(ind) == PyArray_NDIM(arr));
+}
+
+/*
+ * Returns 0 if tuple-object seq is not a tuple of integers.
+ * If the return value is positive, vals will be filled with the elements
+ * from the tuple.
+ */
+static int
+_tuple_of_integers(PyObject *seq, npy_intp *vals, int maxvals)
+{
+ int i;
PyObject *obj;
+ npy_intp temp;
- if (PyString_Check(op) || PyUnicode_Check(op)) {
+ for(i=0; i<maxvals; i++) {
+ obj = PyTuple_GET_ITEM(seq, i);
+ if ((PyArray_Check(obj) && PyArray_NDIM((PyArrayObject *)obj) > 0)
+ || PyList_Check(obj)) {
+ return 0;
+ }
+ temp = PyArray_PyIntAsIntp(obj);
+ if (error_converting(temp)) {
+ PyErr_Clear();
+ return 0;
+ }
+ vals[i] = temp;
+ }
+ return 1;
+}
+
+
+/* return TRUE if ellipses are found else return FALSE */
+static npy_bool
+_check_ellipses(PyObject *op)
+{
+ if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) {
+ return NPY_TRUE;
+ }
+ else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) ||
+ (PyArray_Check(op) &&
+ (PyArray_DIMS((PyArrayObject *)op)==0) &&
+ PyArray_ISBOOL((PyArrayObject *)op))) {
+ return NPY_TRUE;
+ }
+ else if (PySequence_Check(op)) {
+ Py_ssize_t n, i;
PyObject *temp;
+ n = PySequence_Size(op);
+ i = 0;
+ while (i < n) {
+ temp = PySequence_GetItem(op, i);
+ if (temp == Py_Ellipsis) {
+ Py_DECREF(temp);
+ return NPY_TRUE;
+ }
+ Py_DECREF(temp);
+ i++;
+ }
+ }
+ return NPY_FALSE;
+}
+
+NPY_NO_EXPORT PyObject *
+array_subscript_fancy(PyArrayObject *self, PyObject *op, int fancy)
+ {
+ int oned;
+ PyObject *other;
+ PyArrayMapIterObject *mit;
+
+ oned = ((PyArray_NDIM(self) == 1) &&
+ !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1));
+
+ /* wrap arguments into a mapiter object */
+ mit = (PyArrayMapIterObject *) PyArray_MapIterNew(op, oned, fancy);
+ if (mit == NULL) {
+ return NULL;
+ }
+ if (oned) {
+ PyArrayIterObject *it;
+ PyObject *rval;
+ it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self);
+ if (it == NULL) {
+ Py_DECREF(mit);
+ return NULL;
+ }
+ rval = iter_subscript(it, mit->indexobj);
+ Py_DECREF(it);
+ Py_DECREF(mit);
+ return rval;
+ }
+ if (PyArray_MapIterBind(mit, self) != 0) {
+ return NULL;
+ }
+ other = (PyObject *)PyArray_GetMap(mit);
+ Py_DECREF(mit);
+ return other;
+}
+
+/* make sure subscript always returns an array object */
+NPY_NO_EXPORT PyObject *
+array_subscript_asarray(PyArrayObject *self, PyObject *op)
+{
+ return PyArray_EnsureAnyArray(array_subscript(self, op));
+}
+
+NPY_NO_EXPORT PyObject *
+array_subscript_fromobject(PyArrayObject *self, PyObject *op)
+{
+ int fancy;
+ npy_intp vals[NPY_MAXDIMS];
+
+ /* Integer index */
+ if (PyArray_IsIntegerScalar(op) || (PyIndex_Check(op) &&
+ !PySequence_Check(op))) {
+ npy_intp value = PyArray_PyIntAsIntp(op);
+ if (value == -1 && PyErr_Occurred()) {
+ /* fail on error */
+ PyErr_SetString(PyExc_IndexError,
+ "cannot convert index to integer");
+ return NULL;
+ }
+ else {
+ return array_item(self, (Py_ssize_t) value);
+ }
+ }
+ /* optimization for a tuple of integers */
+ if (PyArray_NDIM(self) > 1 && _is_full_index(op, self)) {
+ int ret = _tuple_of_integers(op, vals, PyArray_NDIM(self));
+
+ if (ret > 0) {
+ int idim, ndim = PyArray_NDIM(self);
+ npy_intp *shape = PyArray_DIMS(self);
+ npy_intp *strides = PyArray_STRIDES(self);
+ char *item = PyArray_BYTES(self);
+ for (idim = 0; idim < ndim; idim++) {
+ npy_intp v = vals[idim];
+ if (check_and_adjust_index(&v, shape[idim], idim) < 0) {
+ return NULL;
+ }
+ item += v * strides[idim];
+ }
+ return PyArray_Scalar(item, PyArray_DESCR(self), (PyObject *)self);
+ }
+ }
+
+ /* Check for single field access */
+ if (PyString_Check(op) || PyUnicode_Check(op)) {
+ PyObject *temp, *obj;
+
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
obj = PyDict_GetItem(PyArray_DESCR(self)->fields, op);
if (obj != NULL) {
@@ -944,7 +1087,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
temp = PyUnicode_AsUnicodeEscapeString(op);
}
PyErr_Format(PyExc_ValueError,
- "field named %s not found.",
+ "field named %s not found",
PyBytes_AsString(temp));
if (temp != op) {
Py_DECREF(temp);
@@ -957,6 +1100,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
PySequence_Check(op) &&
!PyTuple_Check(op)) {
int seqlen, i;
+ PyObject *obj;
seqlen = PySequence_Size(op);
for (i = 0; i < seqlen; i++) {
obj = PySequence_GetItem(op, i);
@@ -967,7 +1111,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
Py_DECREF(obj);
}
/*
- * extract multiple fields if all elements in sequence
+ * Extract multiple fields if all elements in sequence
* are either string or unicode (i.e. no break occurred).
*/
fancy = ((seqlen > 0) && (i == seqlen));
@@ -988,15 +1132,19 @@ array_subscript(PyArrayObject *self, PyObject *op)
}
}
+ /* Check for Ellipsis index */
if (op == Py_Ellipsis) {
Py_INCREF(self);
return (PyObject *)self;
}
if (PyArray_NDIM(self) == 0) {
+ int nd;
+ /* Check for None index */
if (op == Py_None) {
return add_new_axes_0d(self, 1);
}
+ /* Check for (empty) tuple index */
if (PyTuple_Check(op)) {
if (0 == PyTuple_GET_SIZE(op)) {
Py_INCREF(self);
@@ -1026,54 +1174,65 @@ array_subscript(PyArrayObject *self, PyObject *op)
NULL);
}
}
- PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
+ PyErr_SetString(PyExc_IndexError,
+ "0-dimensional arrays can't be indexed");
return NULL;
}
+ fancy = fancy_indexing_check(op);
+ if (fancy != SOBJ_NOTFANCY) {
+ return array_subscript_fancy(self, op, fancy);
+ }
+ else {
+ return array_subscript_simple(self, op, 1);
+ }
+}
+
+NPY_NO_EXPORT PyObject *
+array_subscript(PyArrayObject *self, PyObject *op)
+{
+ int fancy;
+ PyObject *ret = NULL;
+ if (!PyArray_Check(op)) {
+ ret = array_subscript_fromobject(self, op);
+ }
+
/* Boolean indexing special case */
- /* The SIZE check might be overly cautious */
- if (PyArray_Check(op) && (PyArray_TYPE((PyArrayObject *)op) == NPY_BOOL)
+ /* The SIZE check is to ensure old behaviour for non-matching arrays. */
+ else if (PyArray_ISBOOL((PyArrayObject *)op)
&& (PyArray_NDIM(self) == PyArray_NDIM((PyArrayObject *)op))
&& (PyArray_SIZE((PyArrayObject *)op) == PyArray_SIZE(self))) {
return (PyObject *)array_boolean_subscript(self,
(PyArrayObject *)op, NPY_CORDER);
}
-
- fancy = fancy_indexing_check(op);
- if (fancy != SOBJ_NOTFANCY) {
- int oned;
-
- oned = ((PyArray_NDIM(self) == 1) &&
- !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1));
-
- /* wrap arguments into a mapiter object */
- mit = (PyArrayMapIterObject *) PyArray_MapIterNew(op, oned, fancy);
- if (mit == NULL) {
- return NULL;
+ /* Error case when indexing 0-dim array with non-boolean. */
+ else if (PyArray_NDIM(self) == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "0-dimensional arrays can't be indexed");
+ return NULL;
+ }
+
+ else {
+ fancy = fancy_indexing_check(op);
+ if (fancy != SOBJ_NOTFANCY) {
+ ret = array_subscript_fancy(self, op, fancy);
}
- if (oned) {
- PyArrayIterObject *it;
- PyObject *rval;
- it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self);
- if (it == NULL) {
- Py_DECREF(mit);
- return NULL;
- }
- rval = iter_subscript(it, mit->indexobj);
- Py_DECREF(it);
- Py_DECREF(mit);
- return rval;
+ else {
+ ret = array_subscript_simple(self, op, 1);
}
- PyArray_MapIterBind(mit, self);
- other = (PyArrayObject *)PyArray_GetMap(mit);
- Py_DECREF(mit);
- return (PyObject *)other;
}
- return array_subscript_simple(self, op);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ if (PyArray_Check(ret) && PyArray_NDIM((PyArrayObject *)ret) == 0
+ && !_check_ellipses(op)) {
+ return PyArray_Return((PyArrayObject *)ret);
+ }
+ return ret;
}
-
/*
* Another assignment hacked by using CopyObject.
* This only works if subscript returns a standard view.
@@ -1090,15 +1249,22 @@ array_ass_sub_simple(PyArrayObject *self, PyObject *ind, PyObject *op)
npy_intp value;
value = PyArray_PyIntAsIntp(ind);
- if (!error_converting(value)) {
- return array_ass_big_item(self, value, op);
+ if (value == -1 && PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ /* Operand is not an integer type */
+ PyErr_Clear();
+ }
+ else {
+ return -1;
+ }
+ }
+ else {
+ return array_ass_item_object(self, value, op);
}
- PyErr_Clear();
/* Rest of standard (view-based) indexing */
-
if (PyArray_CheckExact(self)) {
- tmp = (PyArrayObject *)array_subscript_simple(self, ind);
+ tmp = (PyArrayObject *)array_subscript_simple(self, ind, 1);
if (tmp == NULL) {
return -1;
}
@@ -1117,7 +1283,7 @@ array_ass_sub_simple(PyArrayObject *self, PyObject *ind, PyObject *op)
}
if (!PyArray_Check(tmp0)) {
PyErr_SetString(PyExc_RuntimeError,
- "Getitem not returning array.");
+ "Getitem not returning array");
Py_DECREF(tmp0);
return -1;
}
@@ -1129,38 +1295,44 @@ array_ass_sub_simple(PyArrayObject *self, PyObject *ind, PyObject *op)
return ret;
}
-
-/* return -1 if tuple-object seq is not a tuple of integers.
- otherwise fill vals with converted integers
-*/
static int
-_tuple_of_integers(PyObject *seq, npy_intp *vals, int maxvals)
+array_ass_sub_fancy(PyArrayObject *self, PyObject *ind, PyObject *op, int fancy)
{
- int i;
- PyObject *obj;
- npy_intp temp;
+ int oned, ret;
+ PyArrayMapIterObject *mit;
+ oned = ((PyArray_NDIM(self) == 1) &&
+ !(PyTuple_Check(ind) && PyTuple_GET_SIZE(ind) > 1));
+ mit = (PyArrayMapIterObject *) PyArray_MapIterNew(ind, oned, fancy);
+ if (mit == NULL) {
+ return -1;
+ }
+ if (oned) {
+ PyArrayIterObject *it;
+ int rval;
- for(i=0; i<maxvals; i++) {
- obj = PyTuple_GET_ITEM(seq, i);
- if ((PyArray_Check(obj) && PyArray_NDIM((PyArrayObject *)obj) > 0)
- || PyList_Check(obj)) {
- return -1;
- }
- temp = PyArray_PyIntAsIntp(obj);
- if (error_converting(temp)) {
+ it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
+ if (it == NULL) {
+ Py_DECREF(mit);
return -1;
}
- vals[i] = temp;
+ rval = iter_ass_subscript(it, mit->indexobj, op);
+ Py_DECREF(it);
+ Py_DECREF(mit);
+ return rval;
}
- return 0;
+ if (PyArray_MapIterBind(mit, self) != 0) {
+ return -1;
+ }
+ ret = PyArray_SetMap(mit, op);
+ Py_DECREF(mit);
+ return ret;
}
static int
array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
{
- int ret, oned, fancy;
- PyArrayMapIterObject *mit;
+ int fancy;
npy_intp vals[NPY_MAXDIMS];
if (op == NULL) {
@@ -1172,19 +1344,22 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
return -1;
}
- if (PyInt_Check(ind) || PyArray_IsScalar(ind, Integer) ||
- PyLong_Check(ind) || (PyIndex_Check(ind) &&
- !PySequence_Check(ind))) {
- npy_intp value;
- value = PyArray_PyIntAsIntp(ind);
- if (PyErr_Occurred()) {
- PyErr_Clear();
+ /* Integer index */
+ if (PyArray_IsIntegerScalar(ind) || (PyIndex_Check(ind) &&
+ !PySequence_Check(ind))) {
+ npy_intp value = PyArray_PyIntAsIntp(ind);
+ if (value == -1 && PyErr_Occurred()) {
+ /* fail on error */
+ PyErr_SetString(PyExc_IndexError,
+ "cannot convert index to integer");
+ return -1;
}
else {
- return array_ass_big_item(self, value, op);
+ return array_ass_item_object(self, value, op);
}
}
+ /* Single field access */
if (PyString_Check(ind) || PyUnicode_Check(ind)) {
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
PyObject *obj;
@@ -1201,19 +1376,19 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
}
}
}
-
#if defined(NPY_PY3K)
PyErr_Format(PyExc_ValueError,
- "field named %S not found.",
+ "field named %S not found",
ind);
#else
PyErr_Format(PyExc_ValueError,
- "field named %s not found.",
+ "field named %s not found",
PyString_AsString(ind));
#endif
return -1;
}
+ /* Ellipsis index */
if (ind == Py_Ellipsis) {
/*
* Doing "a[...] += 1" triggers assigning an array to itself,
@@ -1236,45 +1411,49 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
* 3) Using newaxis (None)
* 4) Boolean mask indexing
*/
+
+ /* Check for None or empty tuple */
if (ind == Py_None || (PyTuple_Check(ind) &&
(0 == PyTuple_GET_SIZE(ind) ||
count_new_axes_0d(ind) > 0))) {
- return PyArray_DESCR(self)->f->setitem(op, PyArray_DATA(self), self);
+ return PyArray_SETITEM(self, PyArray_DATA(self), op);
}
+ /* Check for boolean index */
if (PyBool_Check(ind) || PyArray_IsScalar(ind, Bool) ||
(PyArray_Check(ind) &&
- (PyArray_DIMS((PyArrayObject *)ind)==0) &&
+ (PyArray_NDIM((PyArrayObject *)ind)==0) &&
PyArray_ISBOOL((PyArrayObject *)ind))) {
if (PyObject_IsTrue(ind)) {
return PyArray_CopyObject(self, op);
}
- else { /* don't do anything */
+ else { /* False: don't do anything */
return 0;
}
}
- PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
+ PyErr_SetString(PyExc_IndexError,
+ "0-dimensional arrays can't be indexed");
return -1;
}
- /* Integer-tuple */
- if (PyTuple_Check(ind) &&
- (PyTuple_GET_SIZE(ind) == PyArray_NDIM(self)) &&
- (_tuple_of_integers(ind, vals, PyArray_NDIM(self)) >= 0)) {
- int idim, ndim = PyArray_NDIM(self);
- npy_intp *shape = PyArray_DIMS(self);
- npy_intp *strides = PyArray_STRIDES(self);
- char *item = PyArray_DATA(self);
+ /* Integer-tuple index */
+ if (_is_full_index(ind, self)) {
+ int ret = _tuple_of_integers(ind, vals, PyArray_NDIM(self));
- for (idim = 0; idim < ndim; idim++) {
- npy_intp v = vals[idim];
- if (check_and_adjust_index(&v, shape[idim], idim) < 0) {
- return -1;
+ if (ret > 0) {
+ int idim, ndim = PyArray_NDIM(self);
+ npy_intp *shape = PyArray_DIMS(self);
+ npy_intp *strides = PyArray_STRIDES(self);
+ char *item = PyArray_BYTES(self);
+ for (idim = 0; idim < ndim; idim++) {
+ npy_intp v = vals[idim];
+ if (check_and_adjust_index(&v, shape[idim], idim) < 0) {
+ return -1;
+ }
+ item += v * strides[idim];
}
- item += v * strides[idim];
+ return PyArray_SETITEM(self, item, op);
}
- return PyArray_DESCR(self)->f->setitem(op, item, self);
}
- PyErr_Clear();
/* Boolean indexing special case */
if (PyArray_Check(ind) &&
@@ -1285,9 +1464,17 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
PyArrayObject *op_arr;
PyArray_Descr *dtype = NULL;
- op_arr = (PyArrayObject *)PyArray_FromAny(op, dtype, 0, 0, 0, NULL);
- if (op_arr == NULL) {
- return -1;
+ if (!PyArray_Check(op)) {
+ dtype = PyArray_DTYPE(self);
+ Py_INCREF(dtype);
+ op_arr = (PyArrayObject *)PyArray_FromAny(op, dtype, 0, 0, 0, NULL);
+ if (op_arr == NULL) {
+ return -1;
+ }
+ }
+ else {
+ op_arr = op;
+ Py_INCREF(op_arr);
}
if (PyArray_NDIM(op_arr) < 2) {
@@ -1307,147 +1494,16 @@ array_ass_sub(PyArrayObject *self, PyObject *ind, PyObject *op)
fancy = fancy_indexing_check(ind);
if (fancy != SOBJ_NOTFANCY) {
-
- oned = ((PyArray_NDIM(self) == 1) &&
- !(PyTuple_Check(ind) && PyTuple_GET_SIZE(ind) > 1));
- mit = (PyArrayMapIterObject *) PyArray_MapIterNew(ind, oned, fancy);
- if (mit == NULL) {
- return -1;
- }
- if (oned) {
- PyArrayIterObject *it;
- int rval;
-
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self);
- if (it == NULL) {
- Py_DECREF(mit);
- return -1;
- }
- rval = iter_ass_subscript(it, mit->indexobj, op);
- Py_DECREF(it);
- Py_DECREF(mit);
- return rval;
- }
- PyArray_MapIterBind(mit, self);
- ret = PyArray_SetMap(mit, op);
- Py_DECREF(mit);
- return ret;
- }
-
- return array_ass_sub_simple(self, ind, op);
-}
-
-
-/*
- * There are places that require that array_subscript return a PyArrayObject
- * and not possibly a scalar. Thus, this is the function exposed to
- * Python so that 0-dim arrays are passed as scalars
- */
-
-
-static PyObject *
-array_subscript_nice(PyArrayObject *self, PyObject *op)
-{
-
- PyArrayObject *mp;
- npy_intp vals[NPY_MAXDIMS];
-
- if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) ||
- PyLong_Check(op) || (PyIndex_Check(op) &&
- !PySequence_Check(op))) {
- npy_intp value;
- value = PyArray_PyIntAsIntp(op);
- if (PyErr_Occurred()) {
- PyErr_Clear();
- }
- else {
- return array_item_nice(self, (Py_ssize_t) value);
- }
- }
- /* optimization for a tuple of integers */
- if (PyArray_NDIM(self) > 1 &&
- PyTuple_Check(op) &&
- (PyTuple_GET_SIZE(op) == PyArray_NDIM(self)) &&
- (_tuple_of_integers(op, vals, PyArray_NDIM(self)) >= 0)) {
- int idim, ndim = PyArray_NDIM(self);
- npy_intp *shape = PyArray_DIMS(self);
- npy_intp *strides = PyArray_STRIDES(self);
- char *item = PyArray_DATA(self);
-
- for (idim = 0; idim < ndim; idim++) {
- npy_intp v = vals[idim];
- if (check_and_adjust_index(&v, shape[idim], idim) < 0) {
- return NULL;
- }
- item += v * strides[idim];
- }
- return PyArray_Scalar(item, PyArray_DESCR(self), (PyObject *)self);
+ return array_ass_sub_fancy(self, ind, op, fancy);
}
- PyErr_Clear();
-
- mp = (PyArrayObject *)array_subscript(self, op);
- /*
- * mp could be a scalar if op is not an Int, Scalar, Long or other Index
- * object and still convertable to an integer (so that the code goes to
- * array_subscript_simple). So, this cast is a bit dangerous..
- */
-
- if (mp == NULL) {
- return NULL;
- }
-
- if (PyErr_Occurred()) {
- Py_XDECREF(mp);
- return NULL;
- }
-
- /*
- * The following adds some additional logic to avoid calling
- * PyArray_Return if there is an ellipsis.
- */
-
- if (PyArray_Check(mp) && PyArray_NDIM(mp) == 0) {
- npy_bool noellipses = NPY_TRUE;
- if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) {
- noellipses = NPY_FALSE;
- }
- else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) ||
- (PyArray_Check(op) &&
- (PyArray_DIMS((PyArrayObject *)op)==0) &&
- PyArray_ISBOOL((PyArrayObject *)op))) {
- noellipses = NPY_FALSE;
- }
- else if (PySequence_Check(op)) {
- Py_ssize_t n, i;
- PyObject *temp;
-
- n = PySequence_Size(op);
- i = 0;
- while (i < n && noellipses) {
- temp = PySequence_GetItem(op, i);
- if (temp == Py_Ellipsis) {
- noellipses = NPY_FALSE;
- }
- Py_DECREF(temp);
- i++;
- }
- }
- if (noellipses) {
- return PyArray_Return(mp);
- }
+ else {
+ return array_ass_sub_simple(self, ind, op);
}
-
- return (PyObject *)mp;
}
-
NPY_NO_EXPORT PyMappingMethods array_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)array_length, /*mp_length*/
-#else
- (inquiry)array_length, /*mp_length*/
-#endif
- (binaryfunc)array_subscript_nice, /*mp_subscript*/
+ (binaryfunc)array_subscript, /*mp_subscript*/
(objobjargproc)array_ass_sub, /*mp_ass_subscript*/
};
@@ -1484,6 +1540,14 @@ _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters)
return -1;
}
nd = PyArray_NDIM(ba);
+
+ if (nd == 0) {
+ PyErr_SetString(PyExc_IndexError,
+ "only scalars can be indexed by 0-dimensional "
+ "boolean arrays");
+ goto fail;
+ }
+
for (j = 0; j < nd; j++) {
iters[j] = NULL;
}
@@ -1522,6 +1586,7 @@ _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters)
}
/*
+
* Loop through the Boolean array and copy coordinates
* for non-zero entries
*/
@@ -1556,7 +1621,7 @@ _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters)
}
/* convert an indexing object to an INTP indexing array iterator
- if possible -- otherwise, it is a Slice or Ellipsis object
+ if possible -- otherwise, it is a Slice, Ellipsis or None object
and has to be interpreted on bind to a particular
array so leave it NULL for now.
*/
@@ -1566,7 +1631,7 @@ _convert_obj(PyObject *obj, PyArrayIterObject **iter)
PyArray_Descr *indtype;
PyObject *arr;
- if (PySlice_Check(obj) || (obj == Py_Ellipsis)) {
+ if (PySlice_Check(obj) || (obj == Py_Ellipsis) || (obj == Py_None)) {
return 0;
}
else if (PyArray_Check(obj) && PyArray_ISBOOL((PyArrayObject *)obj)) {
@@ -1697,12 +1762,12 @@ PyArray_MapIterNext(PyArrayMapIterObject *mit)
* Let's do it at bind time and also convert all <0 values to >0 here
* as well.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
{
int subnd;
PyObject *sub, *obj = NULL;
- int i, j, n, curraxis, ellipexp, noellip;
+ int i, j, n, curraxis, ellipexp, noellip, newaxes;
PyArrayIterObject *it;
npy_intp dimsize;
npy_intp *indptr;
@@ -1711,31 +1776,25 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
if (subnd < 0) {
PyErr_SetString(PyExc_IndexError,
"too many indices for array");
- return;
+ goto fail;
}
mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr);
if (mit->ait == NULL) {
- return;
- }
- /* no subspace iteration needed. Finish up and Return */
- if (subnd == 0) {
- n = PyArray_NDIM(arr);
- for (i = 0; i < n; i++) {
- mit->iteraxes[i] = i;
- }
- goto finish;
+ goto fail;
}
/*
* all indexing arrays have been converted to 0
* therefore we can extract the subspace with a simple
- * getitem call which will use view semantics
+ * getitem call which will use view semantics, but
+ * without index checking since all original normal
+ * indexes are checked later as fancy ones.
*
* But, be sure to do it with a true array.
*/
if (PyArray_CheckExact(arr)) {
- sub = array_subscript_simple(arr, mit->indexobj);
+ sub = array_subscript_simple(arr, mit->indexobj, 0);
}
else {
Py_INCREF(arr);
@@ -1743,32 +1802,54 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
if (obj == NULL) {
goto fail;
}
- sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj);
+ sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj, 0);
Py_DECREF(obj);
}
if (sub == NULL) {
goto fail;
}
+
+ subnd = PyArray_NDIM(sub);
+ /* no subspace iteration needed. Finish up and Return */
+ if (subnd == 0) {
+ n = PyArray_NDIM(arr);
+ for (i = 0; i < n; i++) {
+ mit->iteraxes[i] = i;
+ }
+ Py_DECREF(sub);
+ goto finish;
+ }
+
mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub);
Py_DECREF(sub);
if (mit->subspace == NULL) {
goto fail;
}
+
+ if (mit->nd + subnd > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "number of dimensions must be within [0, %d], "
+ "indexed array has %d",
+ NPY_MAXDIMS, mit->nd + subnd);
+ goto fail;
+ }
+
/* Expand dimensions of result */
- n = PyArray_NDIM(mit->subspace->ao);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < subnd; i++) {
mit->dimensions[mit->nd+i] = PyArray_DIMS(mit->subspace->ao)[i];
}
- mit->nd += n;
+ mit->nd += subnd;
/*
- * Now, we still need to interpret the ellipsis and slice objects
- * to determine which axes the indexing arrays are referring to
+ * Now, we still need to interpret the ellipsis, slice and None
+ * objects to determine which axes the indexing arrays are
+ * referring to
*/
n = PyTuple_GET_SIZE(mit->indexobj);
/* The number of dimensions an ellipsis takes up */
- ellipexp = PyArray_NDIM(arr) - n + 1;
+ newaxes = subnd - (PyArray_NDIM(arr) - mit->numiter);
+ ellipexp = PyArray_NDIM(arr) + newaxes - n + 1;
/*
* Now fill in iteraxes -- remember indexing arrays have been
* converted to 0's in mit->indexobj
@@ -1777,6 +1858,8 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
j = 0;
/* Only expand the first ellipsis */
noellip = 1;
+ /* count newaxes before iter axes */
+ newaxes = 0;
memset(mit->bscoord, 0, sizeof(npy_intp)*PyArray_NDIM(arr));
for (i = 0; i < n; i++) {
/*
@@ -1791,6 +1874,11 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
curraxis += ellipexp;
noellip = 0;
}
+ else if (obj == Py_None) {
+ if (j == 0) {
+ newaxes += 1;
+ }
+ }
else {
npy_intp start = 0;
npy_intp stop, step;
@@ -1815,6 +1903,9 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
curraxis += 1;
}
}
+ if (mit->consec) {
+ mit->consec = mit->iteraxes[0] + newaxes;
+ }
finish:
/* Here check the indexes (now that we have iteraxes) */
@@ -1845,14 +1936,14 @@ PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr)
}
PyArray_ITER_RESET(it);
}
- return;
+ return 0;
fail:
Py_XDECREF(mit->subspace);
Py_XDECREF(mit->ait);
mit->subspace = NULL;
mit->ait = NULL;
- return;
+ return -1;
}
@@ -1876,20 +1967,14 @@ PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy)
}
mit = (PyArrayMapIterObject *)PyArray_malloc(sizeof(PyArrayMapIterObject));
+ /* set all attributes of mapiter to zero */
+ memset(mit, 0, sizeof(PyArrayMapIterObject));
PyObject_Init((PyObject *)mit, &PyArrayMapIter_Type);
if (mit == NULL) {
return NULL;
}
- for (i = 0; i < NPY_MAXDIMS; i++) {
- mit->iters[i] = NULL;
- }
- mit->index = 0;
- mit->ait = NULL;
- mit->subspace = NULL;
- mit->numiter = 0;
+ /* initialize mapiter attributes */
mit->consec = 1;
- Py_INCREF(indexobj);
- mit->indexobj = indexobj;
if (fancy == SOBJ_LISTTUP) {
PyObject *newobj;
@@ -1897,10 +1982,13 @@ PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy)
if (newobj == NULL) {
goto fail;
}
- Py_DECREF(indexobj);
indexobj = newobj;
mit->indexobj = indexobj;
}
+ else {
+ Py_INCREF(indexobj);
+ mit->indexobj = indexobj;
+ }
if (oned) {
@@ -1913,8 +2001,8 @@ PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy)
*/
/* convert all inputs to iterators */
- if (PyArray_Check(indexobj) &&
- (PyArray_TYPE((PyArrayObject *)indexobj) == NPY_BOOL)) {
+ if (PyArray_Check(indexobj) && PyArray_ISBOOL(indexobj)
+ && !PyArray_IsZeroDim(indexobj)) {
mit->numiter = _nonzero_indices(indexobj, mit->iters);
if (mit->numiter < 0) {
goto fail;
@@ -2033,18 +2121,24 @@ PyArray_MapIterArray(PyArrayObject * a, PyObject * index)
{
PyArrayMapIterObject * mit;
int fancy = fancy_indexing_check(index);
+
+ /*
+ * MapIterNew supports a special mode that allows more efficient 1-d iteration,
+ * but clients that want to make use of this need to use a different API just
+ * for the one-d cases. For the public interface this is confusing, so we
+ * unconditionally disable the 1-d optimized mode, and use the generic
+ * implementation in all cases.
+ */
int oned = 0;
- if (fancy != SOBJ_NOTFANCY) {
- oned = ((PyArray_NDIM(a) == 1) &&
- !(PyTuple_Check(index) && PyTuple_GET_SIZE(index) > 1));
- }
mit = (PyArrayMapIterObject *) PyArray_MapIterNew(index, oned, fancy);
if (mit == NULL) {
return NULL;
}
- PyArray_MapIterBind(mit, a);
+ if (PyArray_MapIterBind(mit, a) != 0) {
+ return NULL;
+ }
PyArray_MapIterReset(mit);
return mit;
}
@@ -2137,9 +2231,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/** END of Subscript Iterator **/
diff --git a/numpy/core/src/multiarray/mapping.h b/numpy/core/src/multiarray/mapping.h
index d4da61b0c..ff11fbca3 100644
--- a/numpy/core/src/multiarray/mapping.h
+++ b/numpy/core/src/multiarray/mapping.h
@@ -7,14 +7,20 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping;
NPY_NO_EXPORT PyMappingMethods array_as_mapping;
#endif
-NPY_NO_EXPORT PyObject *
-array_big_item(PyArrayObject *self, npy_intp i);
-
NPY_NO_EXPORT Py_ssize_t
array_length(PyArrayObject *self);
NPY_NO_EXPORT PyObject *
-array_item_nice(PyArrayObject *self, Py_ssize_t i);
+array_item_asarray(PyArrayObject *self, npy_intp i);
+
+NPY_NO_EXPORT PyObject *
+array_item_asscalar(PyArrayObject *self, npy_intp i);
+
+NPY_NO_EXPORT PyObject *
+array_item(PyArrayObject *self, Py_ssize_t i);
+
+NPY_NO_EXPORT PyObject *
+array_subscript_asarray(PyArrayObject *self, PyObject *op);
NPY_NO_EXPORT PyObject *
array_subscript(PyArrayObject *self, PyObject *op);
@@ -22,16 +28,10 @@ array_subscript(PyArrayObject *self, PyObject *op);
NPY_NO_EXPORT int
array_ass_big_item(PyArrayObject *self, npy_intp i, PyObject *v);
-#if PY_VERSION_HEX < 0x02050000
- #if SIZEOF_INT == NPY_SIZEOF_INTP
- #define array_ass_item array_ass_big_item
- #endif
+/* SIZEOF_SIZE_T is nowhere defined, Py_ssize_t perhaps?*/
+#if SIZEOF_SIZE_T == NPY_SIZEOF_INTP
+#define array_ass_item array_ass_big_item
#else
- #if SIZEOF_SIZE_T == NPY_SIZEOF_INTP
- #define array_ass_item array_ass_big_item
- #endif
-#endif
-#ifndef array_ass_item
NPY_NO_EXPORT int
_array_ass_item(PyArrayObject *self, Py_ssize_t i, PyObject *v);
#define array_ass_item _array_ass_item
@@ -53,7 +53,7 @@ PyArray_MapIterReset(PyArrayMapIterObject *mit);
NPY_NO_EXPORT void
PyArray_MapIterNext(PyArrayMapIterObject *mit);
-NPY_NO_EXPORT void
+NPY_NO_EXPORT int
PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *);
NPY_NO_EXPORT PyObject*
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 79e9fe0a6..e4858eef7 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -372,7 +372,7 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
typed,
PyArray_NDIM(self), PyArray_DIMS(self),
PyArray_STRIDES(self),
- PyArray_DATA(self) + offset,
+ PyArray_BYTES(self) + offset,
PyArray_FLAGS(self)&(~NPY_ARRAY_F_CONTIGUOUS),
(PyObject *)self);
if (ret == NULL) {
@@ -427,7 +427,7 @@ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype,
}
ret = PyArray_NewFromDescr(Py_TYPE(self),
dtype, PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), PyArray_DATA(self) + offset,
+ PyArray_STRIDES(self), PyArray_BYTES(self) + offset,
PyArray_FLAGS(self), (PyObject *)self);
if (ret == NULL) {
return -1;
@@ -633,6 +633,7 @@ array_toscalar(PyArrayObject *self, PyObject *args)
else {
PyErr_SetString(PyExc_ValueError,
"can only convert an array of size 1 to a Python scalar");
+ return NULL;
}
}
/* Special case of C-order flat indexing... :| */
@@ -1026,6 +1027,16 @@ array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
return PyArray_NewCopy(self, order);
}
+/* Separate from array_copy to make __copy__ preserve Fortran contiguity. */
+static PyObject *
+array_copy_keeporder(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ if (!PyArg_ParseTuple(args, "")) {
+ return NULL;
+ }
+ return PyArray_NewCopy(self, NPY_KEEPORDER);
+}
+
#include <stdio.h>
static PyObject *
array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds)
@@ -1169,6 +1180,75 @@ array_sort(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
static PyObject *
+array_partition(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis=-1;
+ int val;
+ NPY_SELECTKIND sortkind = NPY_INTROSELECT;
+ PyObject *order = NULL;
+ PyArray_Descr *saved = NULL;
+ PyArray_Descr *newd;
+ static char *kwlist[] = {"kth", "axis", "kind", "order", NULL};
+ PyArrayObject * ktharray;
+ PyObject * kthobj;
+
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O", kwlist,
+ &kthobj,
+ &axis,
+ PyArray_SelectkindConverter, &sortkind,
+ &order)) {
+ return NULL;
+ }
+
+ if (order == Py_None) {
+ order = NULL;
+ }
+ if (order != NULL) {
+ PyObject *new_name;
+ PyObject *_numpy_internal;
+ saved = PyArray_DESCR(self);
+ if (!PyDataType_HASFIELDS(saved)) {
+ PyErr_SetString(PyExc_ValueError, "Cannot specify " \
+ "order when the array has no fields.");
+ return NULL;
+ }
+ _numpy_internal = PyImport_ImportModule("numpy.core._internal");
+ if (_numpy_internal == NULL) {
+ return NULL;
+ }
+ new_name = PyObject_CallMethod(_numpy_internal, "_newnames",
+ "OO", saved, order);
+ Py_DECREF(_numpy_internal);
+ if (new_name == NULL) {
+ return NULL;
+ }
+ newd = PyArray_DescrNew(saved);
+ Py_DECREF(newd->names);
+ newd->names = new_name;
+ ((PyArrayObject_fields *)self)->descr = newd;
+ }
+
+ ktharray = (PyArrayObject *)PyArray_FromAny(kthobj, NULL, 0, 1,
+ NPY_ARRAY_DEFAULT, NULL);
+ if (ktharray == NULL)
+ return NULL;
+
+ val = PyArray_Partition(self, ktharray, axis, sortkind);
+ Py_DECREF(ktharray);
+
+ if (order != NULL) {
+ Py_XDECREF(PyArray_DESCR(self));
+ ((PyArrayObject_fields *)self)->descr = saved;
+ }
+ if (val < 0) {
+ return NULL;
+ }
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *
array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
int axis = -1;
@@ -1218,6 +1298,67 @@ array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
return PyArray_Return((PyArrayObject *)res);
}
+
+static PyObject *
+array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ int axis = -1;
+ NPY_SELECTKIND sortkind = NPY_INTROSELECT;
+ PyObject *order = NULL, *res;
+ PyArray_Descr *newd, *saved=NULL;
+ static char *kwlist[] = {"kth", "axis", "kind", "order", NULL};
+ PyObject * kthobj;
+ PyArrayObject * ktharray;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O", kwlist,
+ &kthobj,
+ PyArray_AxisConverter, &axis,
+ PyArray_SelectkindConverter, &sortkind,
+ &order)) {
+ return NULL;
+ }
+ if (order == Py_None) {
+ order = NULL;
+ }
+ if (order != NULL) {
+ PyObject *new_name;
+ PyObject *_numpy_internal;
+ saved = PyArray_DESCR(self);
+ if (!PyDataType_HASFIELDS(saved)) {
+ PyErr_SetString(PyExc_ValueError, "Cannot specify "
+ "order when the array has no fields.");
+ return NULL;
+ }
+ _numpy_internal = PyImport_ImportModule("numpy.core._internal");
+ if (_numpy_internal == NULL) {
+ return NULL;
+ }
+ new_name = PyObject_CallMethod(_numpy_internal, "_newnames",
+ "OO", saved, order);
+ Py_DECREF(_numpy_internal);
+ if (new_name == NULL) {
+ return NULL;
+ }
+ newd = PyArray_DescrNew(saved);
+ newd->names = new_name;
+ ((PyArrayObject_fields *)self)->descr = newd;
+ }
+
+ ktharray = (PyArrayObject *)PyArray_FromAny(kthobj, NULL, 0, 1,
+ NPY_ARRAY_DEFAULT, NULL);
+ if (ktharray == NULL)
+ return NULL;
+
+ res = PyArray_ArgPartition(self, ktharray, axis, sortkind);
+ Py_DECREF(ktharray);
+
+ if (order != NULL) {
+ Py_XDECREF(PyArray_DESCR(self));
+ ((PyArrayObject_fields *)self)->descr = saved;
+ }
+ return PyArray_Return((PyArrayObject *)res);
+}
+
static PyObject *
array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
@@ -1290,7 +1431,7 @@ array_deepcopy(PyArrayObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "O", &visit)) {
return NULL;
}
- ret = (PyArrayObject *)PyArray_Copy(self);
+ ret = (PyArrayObject *)PyArray_NewCopy(self, NPY_KEEPORDER);
if (PyDataType_REFCHK(PyArray_DESCR(self))) {
copy = PyImport_ImportModule("copy");
if (copy == NULL) {
@@ -1847,10 +1988,11 @@ array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- PyObject *b;
+ PyObject *fname, *ret, *b, *out = NULL;
static PyObject *numpycore = NULL;
+ char * kwords[] = {"b", "out", NULL };
- if (!PyArg_ParseTuple(args, "O", &b)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) {
return NULL;
}
@@ -1862,8 +2004,15 @@ array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
}
-
- return PyObject_CallMethod(numpycore, "dot", "OO", self, b);
+ fname = PyUString_FromString("dot");
+ if (out == NULL) {
+ ret = PyObject_CallMethodObjArgs(numpycore, fname, self, b, NULL);
+ }
+ else {
+ ret = PyObject_CallMethodObjArgs(numpycore, fname, self, b, out, NULL);
+ }
+ Py_DECREF(fname);
+ return ret;
}
@@ -2151,8 +2300,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
/* for the copy module */
{"__copy__",
- (PyCFunction)array_copy,
- METH_VARARGS | METH_KEYWORDS, NULL},
+ (PyCFunction)array_copy_keeporder,
+ METH_VARARGS, NULL},
{"__deepcopy__",
(PyCFunction)array_deepcopy,
METH_VARARGS, NULL},
@@ -2184,6 +2333,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"argmin",
(PyCFunction)array_argmin,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"argpartition",
+ (PyCFunction)array_argpartition,
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"argsort",
(PyCFunction)array_argsort,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -2222,7 +2374,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"dot",
(PyCFunction)array_dot,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"fill",
(PyCFunction)array_fill,
METH_VARARGS, NULL},
@@ -2253,6 +2405,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"nonzero",
(PyCFunction)array_nonzero,
METH_VARARGS, NULL},
+ {"partition",
+ (PyCFunction)array_partition,
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"prod",
(PyCFunction)array_prod,
METH_VARARGS | METH_KEYWORDS, NULL},
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index 954097e28..0aea85f66 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -82,7 +82,7 @@ static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
}
for (j = 0; j < niterx->size; ++j) {
- copyswap(PyArray_DATA(aout) + j * itemsize, niterx->dataptr, 0, NULL);
+ copyswap(PyArray_BYTES(aout) + j * itemsize, niterx->dataptr, 0, NULL);
PyArrayNeighborhoodIter_Next(niterx);
}
@@ -471,11 +471,9 @@ map_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inp
}
if ((mit->subspace != NULL) && (mit->consec)) {
- if (mit->iteraxes[0] > 0) {
- PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
- if (arr == NULL) {
- return -1;
- }
+ PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
+ if (arr == NULL) {
+ return -1;
}
}
@@ -559,6 +557,30 @@ fail:
return NULL;
}
+
+#if !defined(NPY_PY3K)
+static PyObject *
+int_subclass(PyObject *dummy, PyObject *args)
+{
+
+ PyObject *result = NULL;
+ PyObject *scalar_object = NULL;
+
+ if (!PyArg_UnpackTuple(args, "test_int_subclass", 1, 1, &scalar_object))
+ return NULL;
+
+ if (PyInt_Check(scalar_object))
+ result = Py_True;
+ else
+ result = Py_False;
+
+ Py_INCREF(result);
+
+ return result;
+
+}
+#endif
+
static PyMethodDef Multiarray_TestsMethods[] = {
{"test_neighborhood_iterator",
test_neighborhood_iterator,
@@ -575,6 +597,11 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"test_inplace_increment",
inplace_increment,
METH_VARARGS, NULL},
+#if !defined(NPY_PY3K)
+ {"test_int_subclass",
+ int_subclass,
+ METH_VARARGS, NULL},
+#endif
{NULL, NULL, 0, NULL} /* Sentinel */
};
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 1f455f528..f8e434e67 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -51,6 +51,8 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "item_selection.h"
#include "shape.h"
#include "ctors.h"
+#include "array_assign.h"
+#include "common.h"
/* Only here for API compatibility */
NPY_NO_EXPORT PyTypeObject PyBigArray_Type;
@@ -67,15 +69,13 @@ PyArray_GetPriority(PyObject *obj, double default_)
if (PyArray_CheckExact(obj))
return priority;
- ret = PyObject_GetAttrString(obj, "__array_priority__");
- if (ret != NULL) {
- priority = PyFloat_AsDouble(ret);
- }
- if (PyErr_Occurred()) {
- PyErr_Clear();
- priority = default_;
+ ret = PyArray_GetAttrString_SuppressException(obj, "__array_priority__");
+ if (ret == NULL) {
+ return default_;
}
- Py_XDECREF(ret);
+
+ priority = PyFloat_AsDouble(ret);
+ Py_DECREF(ret);
return priority;
}
@@ -208,7 +208,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
goto fail;
}
for (i = 0; i < n; i++) {
- ptr2[i] = PyArray_DATA(ap) + i*PyArray_STRIDES(ap)[0];
+ ptr2[i] = PyArray_BYTES(ap) + i*PyArray_STRIDES(ap)[0];
}
*((char ***)ptr) = ptr2;
break;
@@ -222,7 +222,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
for (i = 0; i < n; i++) {
ptr3[i] = ptr3[n + (m-1)*i];
for (j = 0; j < m; j++) {
- ptr3[i][j] = PyArray_DATA(ap) + i*PyArray_STRIDES(ap)[0] + j*PyArray_STRIDES(ap)[1];
+ ptr3[i][j] = PyArray_BYTES(ap) + i*PyArray_STRIDES(ap)[0] + j*PyArray_STRIDES(ap)[1];
}
}
*((char ****)ptr) = ptr3;
@@ -338,7 +338,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
axis += ndim;
}
- if (ndim == 1 & axis != 0) {
+ if (ndim == 1 && axis != 0) {
char msg[] = "axis != 0 for ndim == 1; this will raise an error in "
"future versions of numpy";
if (DEPRECATE(msg) < 0) {
@@ -862,6 +862,11 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
if (ret == NULL) {
goto fail;
}
+ /* Ensure that multiarray.inner(<Nx0>,<Mx0>) -> zeros((N,M)) */
+ if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) {
+ memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
+ }
+
dot = (PyArray_DESCR(ret)->f->dotfunc);
if (dot == NULL) {
PyErr_SetString(PyExc_ValueError,
@@ -876,16 +881,13 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
axis = PyArray_NDIM(ap2) - 1;
it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis);
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2));
- while (1) {
+ while (it1->index < it1->size) {
while (it2->index < it2->size) {
dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret);
op += os;
PyArray_ITER_NEXT(it2);
}
PyArray_ITER_NEXT(it1);
- if (it1->index >= it1->size) {
- break;
- }
PyArray_ITER_RESET(it2);
}
NPY_END_THREADS_DESCR(PyArray_DESCR(ap2));
@@ -994,10 +996,6 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) {
memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
}
- else {
- /* Ensure that multiarray.dot([],[]) -> 0 */
- memset(PyArray_DATA(ret), 0, PyArray_ITEMSIZE(ret));
- }
dot = PyArray_DESCR(ret)->f->dotfunc;
if (dot == NULL) {
@@ -1013,16 +1011,13 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
it2 = (PyArrayIterObject *)
PyArray_IterAllButAxis((PyObject *)ap2, &matchDim);
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2));
- while (1) {
+ while (it1->index < it1->size) {
while (it2->index < it2->size) {
dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret);
op += os;
PyArray_ITER_NEXT(it2);
}
PyArray_ITER_NEXT(it1);
- if (it1->index >= it1->size) {
- break;
- }
PyArray_ITER_RESET(it2);
}
NPY_END_THREADS_DESCR(PyArray_DESCR(ap2));
@@ -1177,7 +1172,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum,
op = PyArray_DATA(ret);
os = PyArray_DESCR(ret)->elsize;
ip1 = PyArray_DATA(ap1);
- ip2 = PyArray_DATA(ap2) + n_left*is2;
+ ip2 = PyArray_BYTES(ap2) + n_left*is2;
n = n - n_left;
for (i = 0; i < n_left; i++) {
dot(ip1, is1, ip2, is2, op, n, ret);
@@ -1515,20 +1510,31 @@ PyArray_EquivTypenums(int typenum1, int typenum2)
}
/*** END C-API FUNCTIONS **/
-
+/*
+ * NPY_RELAXED_STRIDES_CHECKING: If the strides logic is changed, the
+ * order specific stride setting is not necessary.
+ */
static PyObject *
-_prepend_ones(PyArrayObject *arr, int nd, int ndmin)
+_prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order)
{
npy_intp newdims[NPY_MAXDIMS];
npy_intp newstrides[NPY_MAXDIMS];
+ npy_intp newstride;
int i, k, num;
PyArrayObject *ret;
PyArray_Descr *dtype;
+ if (order == NPY_FORTRANORDER || PyArray_ISFORTRAN(arr) || PyArray_NDIM(arr) == 0) {
+ newstride = PyArray_DESCR(arr)->elsize;
+ }
+ else {
+ newstride = PyArray_STRIDES(arr)[0] * PyArray_DIMS(arr)[0];
+ }
+
num = ndmin - nd;
for (i = 0; i < num; i++) {
newdims[i] = 1;
- newstrides[i] = PyArray_DESCR(arr)->elsize;
+ newstrides[i] = newstride;
}
for (i = num; i < ndmin; i++) {
k = i - num;
@@ -1669,7 +1675,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
* create a new array from the same data with ones in the shape
* steals a reference to ret
*/
- return _prepend_ones(ret, nd, ndmin);
+ return _prepend_ones(ret, nd, ndmin, order);
clean_type:
Py_XDECREF(type);
@@ -1927,15 +1933,8 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
}
#if defined(NPY_PY3K)
return PyLong_FromSsize_t(count);
-#elif PY_VERSION_HEX >= 0x02050000
- return PyInt_FromSsize_t(count);
#else
- if ((npy_intp)((long)count) == count) {
- return PyInt_FromLong(count);
- }
- else {
- return PyLong_FromVoidPtr((void*)count);
- }
+ return PyInt_FromSsize_t(count);
#endif
}
@@ -2880,7 +2879,7 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
"too many arguments");
goto finish;
}
- if (!PyArray_DescrConverter2(obj, &dtypes[ndtypes])) {
+ if (!PyArray_DescrConverter(obj, &dtypes[ndtypes])) {
goto finish;
}
++ndtypes;
@@ -3537,6 +3536,32 @@ PyDataMem_RENEW(void *ptr, size_t size)
return (char *)result;
}
+static PyObject *
+array_may_share_memory(PyObject *NPY_UNUSED(ignored), PyObject *args)
+{
+ PyArrayObject * self = NULL;
+ PyArrayObject * other = NULL;
+ int overlap;
+
+ if (!PyArg_ParseTuple(args, "O&O&", PyArray_Converter, &self,
+ PyArray_Converter, &other)) {
+ return NULL;
+ }
+
+ overlap = arrays_overlap(self, other);
+ Py_XDECREF(self);
+ Py_XDECREF(other);
+
+ if (overlap) {
+ Py_RETURN_TRUE;
+ }
+ else {
+ Py_RETURN_FALSE;
+ }
+}
+
+
+
static struct PyMethodDef array_module_methods[] = {
{"_get_ndarray_c_version",
(PyCFunction)array__get_ndarray_c_version,
@@ -3637,6 +3662,9 @@ static struct PyMethodDef array_module_methods[] = {
{"result_type",
(PyCFunction)array_result_type,
METH_VARARGS, NULL},
+ {"may_share_memory",
+ (PyCFunction)array_may_share_memory,
+ METH_VARARGS, NULL},
/* Datetime-related functions */
{"datetime_data",
(PyCFunction)array_datetime_data,
@@ -3755,6 +3783,19 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
} \
Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash;
+/*
+ * In Py3K, int is no longer a fixed-width integer type, so don't
+ * inherit numpy.int_ from it.
+ */
+#if defined(NPY_PY3K)
+#define INHERIT_INT(child, parent2) \
+ SINGLE_INHERIT(child, parent2);
+#else
+#define INHERIT_INT(child, parent2) \
+ Py##child##ArrType_Type.tp_flags |= Py_TPFLAGS_INT_SUBCLASS; \
+ DUAL_INHERIT(child, Int, parent2);
+#endif
+
#if defined(NPY_PY3K)
#define DUAL_INHERIT_COMPARE(child, parent1, parent2)
#else
@@ -3783,18 +3824,17 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
SINGLE_INHERIT(Bool, Generic);
SINGLE_INHERIT(Byte, SignedInteger);
SINGLE_INHERIT(Short, SignedInteger);
-#if SIZEOF_INT == SIZEOF_LONG && !defined(NPY_PY3K)
- DUAL_INHERIT(Int, Int, SignedInteger);
+
+#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG
+ INHERIT_INT(Int, SignedInteger);
#else
SINGLE_INHERIT(Int, SignedInteger);
#endif
-#if !defined(NPY_PY3K)
- DUAL_INHERIT(Long, Int, SignedInteger);
-#else
- SINGLE_INHERIT(Long, SignedInteger);
-#endif
-#if NPY_SIZEOF_LONGLONG == SIZEOF_LONG && !defined(NPY_PY3K)
- DUAL_INHERIT(LongLong, Int, SignedInteger);
+
+ INHERIT_INT(Long, SignedInteger);
+
+#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG
+ INHERIT_INT(LongLong, SignedInteger);
#else
SINGLE_INHERIT(LongLong, SignedInteger);
#endif
@@ -3836,6 +3876,9 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
#undef SINGLE_INHERIT
#undef DUAL_INHERIT
+#undef INHERIT_INT
+#undef DUAL_INHERIT2
+#undef DUAL_INHERIT_COMPARE
/*
* Clean up string and unicode array types so they act more like
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 09e572f10..40043648d 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -134,12 +134,10 @@ NpyIter_RemoveAxis(NpyIter *iter, int axis)
axisdata = NIT_INDEX_AXISDATA(axisdata_del, 1);
memmove(axisdata_del, axisdata, (ndim-1-xdim)*sizeof_axisdata);
- /* If there is more than one dimension, shrink the iterator */
- if (ndim > 1) {
- NIT_NDIM(iter) = ndim-1;
- }
- /* Otherwise convert it to a singleton dimension */
- else {
+ /* Shrink the iterator */
+ NIT_NDIM(iter) = ndim - 1;
+ /* If it is now 0-d fill the singleton dimension */
+ if (ndim == 1) {
npy_intp *strides = NAD_STRIDES(axisdata_del);
NAD_SHAPE(axisdata_del) = 1;
for (iop = 0; iop < nop; ++iop) {
@@ -642,6 +640,9 @@ NpyIter_GetIterIndex(NpyIter *iter)
npy_intp sizeof_axisdata;
iterindex = 0;
+ if (ndim == 0) {
+ return 0;
+ }
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
axisdata = NIT_INDEX_AXISDATA(NIT_AXISDATA(iter), ndim-1);
@@ -1750,6 +1751,8 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex)
NIT_ITERINDEX(iter) = iterindex;
+ ndim = ndim ? ndim : 1;
+
if (iterindex == 0) {
dataptr = NIT_RESETDATAPTR(iter);
@@ -2062,8 +2065,9 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/* If last time around, the reduce loop structure was full, we reuse it */
if (reuse_reduce_loops) {
- npy_intp full_transfersize;
+ npy_intp full_transfersize, prev_reduce_outersize;
+ prev_reduce_outersize = NBF_REDUCE_OUTERSIZE(bufferdata);
reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata);
reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata);
reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata);
@@ -2086,6 +2090,13 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
else {
transfersize = full_transfersize;
}
+ if (prev_reduce_outersize < NBF_REDUCE_OUTERSIZE(bufferdata)) {
+ /*
+ * If the previous time around less data was copied it may not
+ * be safe to reuse the buffers even if the pointers match.
+ */
+ reuse_reduce_loops = 0;
+ }
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
NPY_IT_DBG_PRINT3("Reused reduce transfersize: %d innersize: %d "
@@ -2184,6 +2195,11 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
break;
/* Just a copy */
case 0:
+ /* Do not reuse buffer if it did not exist */
+ if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
+ (prev_dataptrs != NULL)) {
+ prev_dataptrs[iop] = NULL;
+ }
/*
* No copyswap or cast was requested, so all we're
* doing is copying the data to fill the buffer and
@@ -2227,6 +2243,11 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
break;
/* Just a copy, but with a reduction */
case NPY_OP_ITFLAG_REDUCE:
+ /* Do not reuse buffer if it did not exist */
+ if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
+ (prev_dataptrs != NULL)) {
+ prev_dataptrs[iop] = NULL;
+ }
if (ad_strides[iop] == 0) {
strides[iop] = 0;
/* It's all in one stride in the inner loop dimension */
@@ -2615,6 +2636,7 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
*/
if (count <= reducespace) {
*reduce_innersize = count;
+ NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else if (nonzerocoord) {
@@ -2622,6 +2644,8 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
count = reducespace;
}
*reduce_innersize = count;
+ /* NOTE: This is similar to the (coord != 0) case below. */
+ NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else {
@@ -2661,8 +2685,20 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
return count;
}
- /* In this case, we can reuse the reduce loops */
- NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ coord = NAD_INDEX(axisdata);
+ if (coord != 0) {
+ /*
+ * In this case, it is only safe to reuse the buffer if the amount
+ * of data copied is not more then the current axes, as is the
+ * case when reuse_reduce_loops was active already.
+ * It should be in principle OK when the idim loop returns immidiatly.
+ */
+ NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ }
+ else {
+ /* In this case, we can reuse the reduce loops */
+ NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ }
*reduce_innersize = reducespace;
count /= reducespace;
@@ -2687,7 +2723,6 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
"the outer loop? %d\n", iop, (int)stride0op[iop]);
}
shape = NAD_SHAPE(axisdata);
- coord = NAD_INDEX(axisdata);
reducespace += (shape-coord-1) * factor;
factor *= shape;
NIT_ADVANCE_AXISDATA(axisdata, 1);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index cfbaea321..8a1e3c9a1 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -54,8 +54,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape,
- int output_scalars);
+ npy_intp *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
PyArrayObject *op,
@@ -75,7 +74,7 @@ static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
- int only_inputs, int output_scalars);
+ int only_inputs);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
@@ -86,7 +85,7 @@ npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
- int **op_axes, int output_scalars);
+ int **op_axes);
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags,
@@ -122,8 +121,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
npy_int8 *perm;
NpyIter_BufferData *bufferdata = NULL;
- int any_allocate = 0, any_missing_dtypes = 0,
- output_scalars = 0, need_subtype = 0;
+ int any_allocate = 0, any_missing_dtypes = 0, need_subtype = 0;
/* The subtype for automatically allocated outputs */
double subtype_priority = NPY_PRIORITY;
@@ -158,6 +156,22 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
return NULL;
}
+ /*
+ * Before 1.8, if `oa_ndim == 0`, this meant `op_axes != NULL` was an error.
+ * With 1.8, `oa_ndim == -1` takes this role, while op_axes in that case
+ * enforces a 0-d iterator. Using `oa_ndim == 0` with `op_axes == NULL`
+ * is thus deprecated with version 1.8.
+ */
+ if ((oa_ndim == 0) && (op_axes == NULL)) {
+ char* mesg = "using `oa_ndim == 0` when `op_axes` is NULL is "
+ "deprecated. Use `oa_ndim == -1` or the MultiNew "
+ "iterator for NumPy <1.8 compatibility";
+ if (DEPRECATE(mesg) < 0) {
+ return NULL;
+ }
+ oa_ndim = -1;
+ }
+
/* Error check 'oa_ndim' and 'op_axes', which must be used together */
if (!npyiter_check_op_axes(nop, oa_ndim, op_axes, itershape)) {
return NULL;
@@ -175,12 +189,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* Calculate how many dimensions the iterator should have */
ndim = npyiter_calculate_ndim(nop, op_in, oa_ndim);
- /* If 'ndim' is zero, any outputs should be scalars */
- if (ndim == 0) {
- output_scalars = 1;
- ndim = 1;
- }
-
NPY_IT_TIME_POINT(c_calculate_ndim);
/* Allocate memory for the iterator */
@@ -231,8 +239,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* Fill in the AXISDATA arrays and set the ITERSIZE field */
if (!npyiter_fill_axisdata(iter, flags, op_itflags, op_dataptr,
- op_flags, op_axes, itershape,
- output_scalars)) {
+ op_flags, op_axes, itershape)) {
NpyIter_Deallocate(iter);
return NULL;
}
@@ -338,8 +345,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
dtype = npyiter_get_common_dtype(nop, op,
op_itflags, op_dtype,
op_request_dtypes,
- only_inputs,
- output_scalars);
+ only_inputs);
if (dtype == NULL) {
NpyIter_Deallocate(iter);
return NULL;
@@ -389,7 +395,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
* done now using a memory layout matching the iterator.
*/
if (!npyiter_allocate_arrays(iter, flags, op_dtype, subtype, op_flags,
- op_itflags, op_axes, output_scalars)) {
+ op_itflags, op_axes)) {
NpyIter_Deallocate(iter);
return NULL;
}
@@ -504,7 +510,7 @@ NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
{
return NpyIter_AdvancedNew(nop, op_in, flags, order, casting,
op_flags, op_request_dtypes,
- 0, NULL, NULL, 0);
+ -1, NULL, NULL, 0);
}
/*NUMPY_API
@@ -521,7 +527,7 @@ NpyIter_New(PyArrayObject *op, npy_uint32 flags,
return NpyIter_AdvancedNew(1, &op, flags, order, casting,
&op_flags, &dtype,
- 0, NULL, NULL, 0);
+ -1, NULL, NULL, 0);
}
/*NUMPY_API
@@ -758,53 +764,60 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
char axes_dupcheck[NPY_MAXDIMS];
int iop, idim;
- if (oa_ndim == 0 && (op_axes != NULL || itershape != NULL)) {
- PyErr_Format(PyExc_ValueError,
- "If 'op_axes' or 'itershape' is not NULL in the"
- "iterator constructor, 'oa_ndim' must be greater than zero");
- return 0;
- }
- else if (oa_ndim > 0) {
- if (oa_ndim > NPY_MAXDIMS) {
+ if (oa_ndim < 0) {
+ /*
+ * If `oa_ndim < 0`, `op_axes` and `itershape` are signalled to
+ * be unused and should be NULL. (Before NumPy 1.8 this was
+ * signalled by `oa_ndim == 0`.)
+ */
+ if (op_axes != NULL || itershape != NULL) {
PyErr_Format(PyExc_ValueError,
+ "If 'op_axes' or 'itershape' is not NULL in the iterator "
+ "constructor, 'oa_ndim' must be zero or greater");
+ return 0;
+ }
+ return 1;
+ }
+ if (oa_ndim > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d dimensions "
"(%d were requested for op_axes)",
(int)NPY_MAXDIMS, oa_ndim);
- return 0;
- }
- else if (op_axes == NULL) {
- PyErr_Format(PyExc_ValueError,
- "If 'oa_ndim' is greater than zero in the iterator "
- "constructor, then op_axes cannot be NULL");
- return 0;
- }
+ return 0;
+ }
+ if (op_axes == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "If 'oa_ndim' is zero or greater in the iterator "
+ "constructor, then op_axes cannot be NULL");
+ return 0;
+ }
- /* Check that there are no duplicates in op_axes */
- for (iop = 0; iop < nop; ++iop) {
- int *axes = op_axes[iop];
- if (axes != NULL) {
- memset(axes_dupcheck, 0, NPY_MAXDIMS);
- for (idim = 0; idim < oa_ndim; ++idim) {
- npy_intp i = axes[idim];
- if (i >= 0) {
- if (i >= NPY_MAXDIMS) {
- PyErr_Format(PyExc_ValueError,
- "The 'op_axes' provided to the iterator "
- "constructor for operand %d "
- "contained invalid "
- "values %d", (int)iop, (int)i);
- return 0;
- } else if(axes_dupcheck[i] == 1) {
- PyErr_Format(PyExc_ValueError,
- "The 'op_axes' provided to the iterator "
- "constructor for operand %d "
- "contained duplicate "
- "value %d", (int)iop, (int)i);
- return 0;
- }
- else {
- axes_dupcheck[i] = 1;
- }
+ /* Check that there are no duplicates in op_axes */
+ for (iop = 0; iop < nop; ++iop) {
+ int *axes = op_axes[iop];
+ if (axes != NULL) {
+ memset(axes_dupcheck, 0, NPY_MAXDIMS);
+ for (idim = 0; idim < oa_ndim; ++idim) {
+ npy_intp i = axes[idim];
+ if (i >= 0) {
+ if (i >= NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "The 'op_axes' provided to the iterator "
+ "constructor for operand %d "
+ "contained invalid "
+ "values %d", (int)iop, (int)i);
+ return 0;
+ }
+ else if (axes_dupcheck[i] == 1) {
+ PyErr_Format(PyExc_ValueError,
+ "The 'op_axes' provided to the iterator "
+ "constructor for operand %d "
+ "contained duplicate "
+ "value %d", (int)iop, (int)i);
+ return 0;
+ }
+ else {
+ axes_dupcheck[i] = 1;
}
}
}
@@ -819,7 +832,7 @@ npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim)
{
/* If 'op_axes' is being used, force 'ndim' */
- if (oa_ndim > 0 ) {
+ if (oa_ndim >= 0 ) {
return oa_ndim;
}
/* Otherwise it's the maximum 'ndim' from the operands */
@@ -1014,7 +1027,6 @@ npyiter_prepare_one_operand(PyArrayObject **op,
if (PyArray_Check(*op)) {
- npy_uint32 tmp;
if ((*op_itflags) & NPY_OP_ITFLAG_WRITE
&& PyArray_FailUnlessWriteable(*op, "operand array with iterator "
@@ -1439,8 +1451,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape,
- int output_scalars)
+ npy_intp *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -1540,6 +1551,13 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
+ if (ndim == 0) {
+ /* Need to fill the first axisdata, even if the iterator is 0-d */
+ NAD_SHAPE(axisdata) = 1;
+ NAD_INDEX(axisdata) = 0;
+ memcpy(NAD_PTRS(axisdata), op_dataptr, NPY_SIZEOF_INTP*nop);
+ }
+
/* Now process the operands, filling in the axisdata */
for (idim = 0; idim < ndim; ++idim) {
npy_intp bshape = broadcast_shape[ndim-idim-1];
@@ -1560,7 +1578,7 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
ondim = PyArray_NDIM(op_cur);
if (bshape == 1) {
strides[iop] = 0;
- if (idim >= ondim && !output_scalars &&
+ if (idim >= ondim &&
(op_flags[iop] & NPY_ITER_NO_BROADCAST)) {
goto operand_different_than_broadcast;
}
@@ -1681,8 +1699,8 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
}
/* Now fill in the ITERSIZE member */
- NIT_ITERSIZE(iter) = broadcast_shape[0];
- for (idim = 1; idim < ndim; ++idim) {
+ NIT_ITERSIZE(iter) = 1;
+ for (idim = 0; idim < ndim; ++idim) {
NIT_ITERSIZE(iter) *= broadcast_shape[idim];
}
/* The range defaults to everything */
@@ -2003,7 +2021,10 @@ npyiter_replace_axisdata(NpyIter *iter, int iop,
NIT_RESETDATAPTR(iter)[iop] = op_dataptr;
NIT_BASEOFFSETS(iter)[iop] = baseoffset;
axisdata = axisdata0;
- for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
+ /* Fill at least one axisdata, for the 0-d case */
+ NAD_PTRS(axisdata)[iop] = op_dataptr;
+ NIT_ADVANCE_AXISDATA(axisdata, 1);
+ for (idim = 1; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
NAD_PTRS(axisdata)[iop] = op_dataptr;
}
}
@@ -2029,7 +2050,7 @@ npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags)
/*
* If there is only one element being iterated, we just have
* to touch the first AXISDATA because nothing will ever be
- * incremented.
+ * incremented. This also initializes the data for the 0-d case.
*/
if (NIT_ITERSIZE(iter) == 1) {
if (itflags & NPY_ITFLAG_HASINDEX) {
@@ -2399,7 +2420,7 @@ static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
- int only_inputs, int output_scalars)
+ int only_inputs)
{
int iop;
npy_intp narrs = 0, ndtypes = 0;
@@ -2698,7 +2719,7 @@ npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
- int **op_axes, int output_scalars)
+ int **op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -2729,7 +2750,7 @@ npyiter_allocate_arrays(NpyIter *iter,
if (op[iop] == NULL) {
PyArrayObject *out;
PyTypeObject *op_subtype;
- int ondim = output_scalars ? 0 : ndim;
+ int ondim = ndim;
/* Check whether the subtype was disabled */
op_subtype = (op_flags[iop] & NPY_ITER_NO_SUBTYPE) ?
@@ -2902,7 +2923,7 @@ npyiter_allocate_arrays(NpyIter *iter,
if ((itflags & NPY_ITFLAG_BUFFER) &&
!(op_itflags[iop] & NPY_OP_ITFLAG_CAST)) {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
- if (ndim == 1) {
+ if (ndim <= 1) {
op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER;
NBF_STRIDES(bufferdata)[iop] = NAD_STRIDES(axisdata)[iop];
}
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index 1251baa6e..ae24f46e6 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -294,7 +294,7 @@ struct NpyIter_AD {
#define NIT_SIZEOF_ITERATOR(itflags, ndim, nop) ( \
sizeof(struct NpyIter_InternalOnly) + \
NIT_AXISDATA_OFFSET(itflags, ndim, nop) + \
- NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim))
+ NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim ? ndim : 1))
/* Internal helper functions shared between implementation files */
NPY_NO_EXPORT void
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index fd88cdbc7..f78979270 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -95,7 +95,6 @@ NpyIter_GlobalFlagsConverter(PyObject *flags_in, npy_uint32 *flags)
npy_uint32 flag;
if (flags_in == NULL || flags_in == Py_None) {
- *flags = 0;
return 1;
}
@@ -526,7 +525,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
return 0;
}
- *oa_ndim = 0;
+ *oa_ndim = -1;
/* Copy the tuples into op_axes */
for (iop = 0; iop < nop; ++iop) {
@@ -545,13 +544,8 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
Py_DECREF(a);
return 0;
}
- if (*oa_ndim == 0) {
+ if (*oa_ndim == -1) {
*oa_ndim = PySequence_Size(a);
- if (*oa_ndim == 0) {
- PyErr_SetString(PyExc_ValueError,
- "op_axes must have at least one dimension");
- return 0;
- }
if (*oa_ndim > NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
"Too many dimensions in op_axes");
@@ -575,7 +569,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
op_axes[iop][idim] = -1;
}
else {
- op_axes[iop][idim] = PyInt_AsLong(v);
+ op_axes[iop][idim] = PyArray_PyIntAsInt(v);
if (op_axes[iop][idim]==-1 &&
PyErr_Occurred()) {
Py_DECREF(a);
@@ -589,7 +583,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
}
}
- if (*oa_ndim == 0) {
+ if (*oa_ndim == -1) {
PyErr_SetString(PyExc_ValueError,
"If op_axes is provided, at least one list of axes "
"must be contained within it");
@@ -726,7 +720,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
NPY_CASTING casting = NPY_SAFE_CASTING;
npy_uint32 op_flags[NPY_MAXARGS];
PyArray_Descr *op_request_dtypes[NPY_MAXARGS];
- int oa_ndim = 0;
+ int oa_ndim = -1;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
PyArray_Dims itershape = {NULL, 0};
@@ -784,9 +778,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
}
if (itershape.len > 0) {
- if (oa_ndim == 0) {
+ if (oa_ndim == -1) {
oa_ndim = itershape.len;
- memset(op_axes, 0, sizeof(op_axes[0])*oa_ndim);
+ memset(op_axes, 0, sizeof(op_axes[0]) * nop);
}
else if (oa_ndim != itershape.len) {
PyErr_SetString(PyExc_ValueError,
@@ -800,10 +794,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
itershape.ptr = NULL;
}
-
self->iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags,
op_request_dtypes,
- oa_ndim, oa_ndim > 0 ? op_axes : NULL,
+ oa_ndim, oa_ndim >= 0 ? op_axes : NULL,
itershape.ptr,
buffersize);
@@ -860,7 +853,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self),
int iop, nop = 0, inest, nnest = 0;
PyArrayObject *op[NPY_MAXARGS];
- npy_uint32 flags = 0, flags_inner = 0;
+ npy_uint32 flags = 0, flags_inner;
NPY_ORDER order = NPY_KEEPORDER;
NPY_CASTING casting = NPY_SAFE_CASTING;
npy_uint32 op_flags[NPY_MAXARGS], op_flags_inner[NPY_MAXARGS];
@@ -1754,11 +1747,7 @@ static PyObject *npyiter_iterrange_get(NewNpyArrayIterObject *self)
static int npyiter_iterrange_set(NewNpyArrayIterObject *self, PyObject *value)
{
-#if PY_VERSION_HEX >= 0x02050000
npy_intp istart = 0, iend = 0;
-#else
- long istart = 0, iend = 0;
-#endif
if (value == NULL) {
PyErr_SetString(PyExc_AttributeError,
@@ -1771,11 +1760,7 @@ static int npyiter_iterrange_set(NewNpyArrayIterObject *self, PyObject *value)
return -1;
}
-#if PY_VERSION_HEX >= 0x02050000
if (!PyArg_ParseTuple(value, "nn", &istart, &iend)) {
-#else
- if (!PyArg_ParseTuple(value, "ll", &istart, &iend)) {
-#endif
return -1;
}
@@ -2320,16 +2305,30 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
}
static PyMethodDef npyiter_methods[] = {
- {"reset", (PyCFunction)npyiter_reset, METH_NOARGS, NULL},
- {"copy", (PyCFunction)npyiter_copy, METH_NOARGS, NULL},
- {"__copy__", (PyCFunction)npyiter_copy, METH_NOARGS, NULL},
- {"iternext", (PyCFunction)npyiter_iternext, METH_NOARGS, NULL},
- {"remove_axis", (PyCFunction)npyiter_remove_axis, METH_VARARGS, NULL},
- {"remove_multi_index", (PyCFunction)npyiter_remove_multi_index,
- METH_NOARGS, NULL},
- {"enable_external_loop", (PyCFunction)npyiter_enable_external_loop,
- METH_NOARGS, NULL},
- {"debug_print", (PyCFunction)npyiter_debug_print, METH_NOARGS, NULL},
+ {"reset",
+ (PyCFunction)npyiter_reset,
+ METH_NOARGS, NULL},
+ {"copy",
+ (PyCFunction)npyiter_copy,
+ METH_NOARGS, NULL},
+ {"__copy__",
+ (PyCFunction)npyiter_copy,
+ METH_NOARGS, NULL},
+ {"iternext",
+ (PyCFunction)npyiter_iternext,
+ METH_NOARGS, NULL},
+ {"remove_axis",
+ (PyCFunction)npyiter_remove_axis,
+ METH_VARARGS, NULL},
+ {"remove_multi_index",
+ (PyCFunction)npyiter_remove_multi_index,
+ METH_NOARGS, NULL},
+ {"enable_external_loop",
+ (PyCFunction)npyiter_enable_external_loop,
+ METH_NOARGS, NULL},
+ {"debug_print",
+ (PyCFunction)npyiter_debug_print,
+ METH_NOARGS, NULL},
{NULL, NULL, 0, NULL},
};
@@ -2398,7 +2397,6 @@ static PyGetSetDef npyiter_getsets[] = {
};
NPY_NO_EXPORT PySequenceMethods npyiter_as_sequence = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)npyiter_seq_length, /*sq_length*/
(binaryfunc)NULL, /*sq_concat*/
(ssizeargfunc)NULL, /*sq_repeat*/
@@ -2409,26 +2407,10 @@ NPY_NO_EXPORT PySequenceMethods npyiter_as_sequence = {
(objobjproc)NULL, /*sq_contains */
(binaryfunc)NULL, /*sq_inplace_concat */
(ssizeargfunc)NULL, /*sq_inplace_repeat */
-#else
- (inquiry)npyiter_seq_length, /*sq_length*/
- (binaryfunc)NULL, /*sq_concat is handled by nb_add*/
- (intargfunc)NULL, /*sq_repeat is handled nb_multiply*/
- (intargfunc)npyiter_seq_item, /*sq_item*/
- (intintargfunc)npyiter_seq_slice, /*sq_slice*/
- (intobjargproc)npyiter_seq_ass_item, /*sq_ass_item*/
- (intintobjargproc)npyiter_seq_ass_slice,/*sq_ass_slice*/
- (objobjproc)NULL, /*sq_contains */
- (binaryfunc)NULL, /*sg_inplace_concat */
- (intargfunc)NULL /*sg_inplace_repeat */
-#endif
};
NPY_NO_EXPORT PyMappingMethods npyiter_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)npyiter_seq_length, /*mp_length*/
-#else
- (inquiry)npyiter_seq_length, /*mp_length*/
-#endif
(binaryfunc)npyiter_subscript, /*mp_subscript*/
(objobjargproc)npyiter_ass_subscript, /*mp_ass_subscript*/
};
@@ -2490,7 +2472,5 @@ NPY_NO_EXPORT PyTypeObject NpyIter_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index c37a232f4..db62bd5fd 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -209,6 +209,26 @@ PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op)
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
+
+ if (!PyArray_Check(m2)) {
+ /*
+ * Catch priority inversion and punt, but only if it's guaranteed
+ * that we were called through m1 and the other guy is not an array
+ * at all. Note that some arrays need to pass through here even
+ * with priorities inverted, for example: float(17) * np.matrix(...)
+ *
+ * See also:
+ * - https://github.com/numpy/numpy/issues/3502
+ * - https://github.com/numpy/numpy/issues/3503
+ */
+ double m1_prio = PyArray_GetPriority(m1, NPY_SCALAR_PRIORITY);
+ double m2_prio = PyArray_GetPriority(m2, NPY_SCALAR_PRIORITY);
+ if (m1_prio < m2_prio) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ }
+
return PyObject_CallFunction(op, "OO", m1, m2);
}
@@ -320,7 +340,6 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
}
}
}
-#if (PY_VERSION_HEX >= 0x02050000)
if (PyIndex_Check(o2)) {
PyObject* value = PyNumber_Index(o2);
Py_ssize_t val;
@@ -338,7 +357,6 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
*out_exponent = (double) val;
return NPY_INTPOS_SCALAR;
}
-#endif
return NPY_NOSCALAR;
}
@@ -831,7 +849,6 @@ _array_copy_nice(PyArrayObject *self)
return PyArray_Return((PyArrayObject *) PyArray_Copy(self));
}
-#if PY_VERSION_HEX >= 0x02050000
static PyObject *
array_index(PyArrayObject *v)
{
@@ -840,9 +857,14 @@ array_index(PyArrayObject *v)
"one element can be converted to an index");
return NULL;
}
+ if (PyArray_NDIM(v) != 0) {
+ if (DEPRECATE("converting an array with ndim > 0 to an index"
+ " will result in an error in the future") < 0) {
+ return NULL;
+ }
+ }
return PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
}
-#endif
NPY_NO_EXPORT PyNumberMethods array_as_number = {
@@ -902,9 +924,5 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_true_divide, /*nb_true_divide*/
(binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/
(binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/
-
-#if PY_VERSION_HEX >= 0x02050000
(unaryfunc)array_index, /* nb_index */
-#endif
-
};
diff --git a/numpy/core/src/multiarray/numpymemoryview.c b/numpy/core/src/multiarray/numpymemoryview.c
index 991321a8c..27c02e592 100644
--- a/numpy/core/src/multiarray/numpymemoryview.c
+++ b/numpy/core/src/multiarray/numpymemoryview.c
@@ -21,7 +21,7 @@
#include "numpymemoryview.h"
-#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x02070000)
+#if PY_VERSION_HEX < 0x02070000
/*
* Memory allocation
@@ -241,9 +241,7 @@ NPY_NO_EXPORT PyTypeObject PyMemorySimpleView_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
diff --git a/numpy/core/src/multiarray/numpymemoryview.h b/numpy/core/src/multiarray/numpymemoryview.h
index 3a2661754..93a007106 100644
--- a/numpy/core/src/multiarray/numpymemoryview.h
+++ b/numpy/core/src/multiarray/numpymemoryview.h
@@ -5,7 +5,7 @@
* Memoryview is introduced to 2.x series only in 2.7, so for supporting 2.6,
* we need to have a minimal implementation here.
*/
-#if (PY_VERSION_HEX >= 0x02060000) && (PY_VERSION_HEX < 0x02070000)
+#if PY_VERSION_HEX < 0x02070000
typedef struct {
PyObject_HEAD
diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/multiarray/numpyos.c
index cef170108..23ba57e5d 100644
--- a/numpy/core/src/multiarray/numpyos.c
+++ b/numpy/core/src/multiarray/numpyos.c
@@ -19,12 +19,7 @@
* exponent.
*/
-/* We force 3 digits on windows for python < 2.6 for compatibility reason */
-#if defined(MS_WIN32) && (PY_VERSION_HEX < 0x02060000)
-#define MIN_EXPONENT_DIGITS 3
-#else
#define MIN_EXPONENT_DIGITS 2
-#endif
/*
* Ensure that any exponent, if present, is at least MIN_EXPONENT_DIGITS
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index cdb929699..9dfd3c4c8 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -295,17 +295,20 @@ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
return (PyObject *)r;
}
+ /* Need to INCREF typecode because PyArray_NewFromDescr steals a
+ * reference below and we still need to access typecode afterwards. */
+ Py_INCREF(typecode);
r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
typecode,
0, NULL,
NULL, NULL, 0, NULL);
if (r==NULL) {
- Py_XDECREF(outcode);
+ Py_DECREF(typecode); Py_XDECREF(outcode);
return NULL;
}
if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) {
if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) {
- Py_XDECREF(outcode); Py_DECREF(r);
+ Py_DECREF(typecode); Py_XDECREF(outcode); Py_DECREF(r);
return NULL;
}
goto finish;
@@ -332,19 +335,20 @@ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
finish:
if (outcode == NULL) {
+ Py_DECREF(typecode);
return (PyObject *)r;
}
if (PyArray_EquivTypes(outcode, typecode)) {
if (!PyTypeNum_ISEXTENDED(typecode->type_num)
|| (outcode->elsize == typecode->elsize)) {
- Py_DECREF(outcode);
+ Py_DECREF(typecode); Py_DECREF(outcode);
return (PyObject *)r;
}
}
/* cast if necessary to desired output typecode */
ret = PyArray_CastToType((PyArrayObject *)r, outcode, 0);
- Py_DECREF(r);
+ Py_DECREF(typecode); Py_DECREF(r);
return ret;
}
@@ -502,7 +506,7 @@ PyArray_FieldNames(PyObject *fields)
if (_numpy_internal == NULL) {
return NULL;
}
- tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields);
+ tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "OO", fields, Py_False);
Py_DECREF(_numpy_internal);
if (tup == NULL) {
return NULL;
@@ -561,16 +565,15 @@ PyArray_DescrFromScalar(PyObject *sc)
#endif
}
else {
- descr->elsize = Py_SIZE((PyVoidScalarObject *)sc);
- descr->fields = PyObject_GetAttrString(sc, "fields");
- if (!descr->fields
- || !PyDict_Check(descr->fields)
- || (descr->fields == Py_None)) {
- Py_XDECREF(descr->fields);
- descr->fields = NULL;
- }
- if (descr->fields) {
- descr->names = PyArray_FieldNames(descr->fields);
+ PyArray_Descr *dtype;
+ dtype = (PyArray_Descr *)PyObject_GetAttrString(sc, "dtype");
+ if (dtype != NULL) {
+ descr->elsize = dtype->elsize;
+ descr->fields = dtype->fields;
+ Py_XINCREF(dtype->fields);
+ descr->names = dtype->names;
+ Py_XINCREF(dtype->names);
+ Py_DECREF(dtype);
}
PyErr_Clear();
}
@@ -799,6 +802,15 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
return PyErr_NoMemory();
}
vobj->obval = destptr;
+
+ /*
+ * No base available for copyswp and no swap required.
+ * Copy data directly into dest.
+ */
+ if (base == NULL) {
+ memcpy(destptr, data, itemsize);
+ return obj;
+ }
}
}
else {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 8f39b57c4..f9b35deed 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -101,9 +101,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/**end repeat**/
@@ -364,7 +362,6 @@ gentype_repr(PyObject *self)
return ret;
}
-#if PY_VERSION_HEX >= 0x02060000
/*
* The __format__ method for PEP 3101.
*/
@@ -435,7 +432,6 @@ gentype_format(PyObject *self, PyObject *args)
Py_DECREF(obj);
return ret;
}
-#endif
#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING
#undef NPY_LONGDOUBLE_FMT
@@ -468,7 +464,7 @@ format_@name@(char *buf, size_t buflen, @type@ val, unsigned int prec)
/* If nothing but digits after sign, append ".0" */
cnt = strlen(buf);
- for (i = (val < 0) ? 1 : 0; i < cnt; ++i) {
+ for (i = (buf[0] == '-') ? 1 : 0; i < cnt; ++i) {
if (!isdigit(Py_CHARMASK(buf[i]))) {
break;
}
@@ -517,11 +513,9 @@ format_@name@(char *buf, size_t buflen, @type@ val, unsigned int prec)
fprintf(stderr, "Error while formatting\n");
return;
}
-#if PY_VERSION_HEX >= 0x02060000
if (!npy_isfinite(val.imag)) {
strncat(buf, "*", 1);
}
-#endif
strncat(buf, "j", 1);
}
else {
@@ -570,11 +564,9 @@ format_@name@(char *buf, size_t buflen, @type@ val, unsigned int prec)
else {
strcpy(im, "-inf");
}
- #if PY_VERSION_HEX >= 0x02060000
if (!npy_isfinite(val.imag)) {
strncat(im, "*", 1);
}
- #endif
}
PyOS_snprintf(buf, buflen, "(%s%sj)", re, im);
}
@@ -1059,9 +1051,7 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_true_divide, /*nb_true_divide*/
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
-#if PY_VERSION_HEX >= 0x02050000
(unaryfunc)NULL, /*nb_index*/
-#endif
};
@@ -2014,13 +2004,11 @@ static PyMethodDef gentype_methods[] = {
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
#endif
-#if PY_VERSION_HEX >= 0x02060000
/* For the format function */
{"__format__",
gentype_format,
METH_VARARGS,
"NumPy array scalar formatter"},
-#endif
{"setflags",
(PyCFunction)gentype_setflags,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -2232,32 +2220,19 @@ fail:
}
static PyMappingMethods voidtype_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)voidtype_length, /*mp_length*/
-#else
- (inquiry)voidtype_length, /*mp_length*/
-#endif
(binaryfunc)voidtype_subscript, /*mp_subscript*/
(objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/
};
static PySequenceMethods voidtype_as_sequence = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)voidtype_length, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
(ssizeargfunc)voidtype_item, /*sq_item*/
0, /*sq_slice*/
(ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/
-#else
- (inquiry)voidtype_length, /*sq_length*/
- 0, /*sq_concat*/
- 0, /*sq_repeat*/
- (intargfunc)voidtype_item, /*sq_item*/
- 0, /*sq_slice*/
- (intobjargproc)voidtype_ass_item, /*sq_ass_item*/
-#endif
0, /* ssq_ass_slice */
0, /* sq_contains */
0, /* sq_inplace_concat */
@@ -2325,7 +2300,6 @@ gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr)
}
#endif /* !defined(NPY_PY3K) */
-#if PY_VERSION_HEX >= 0x02060000
static int
gentype_getbuffer(PyObject *self, Py_buffer *view, int flags)
@@ -2341,7 +2315,6 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags)
/* releasebuffer is not needed */
-#endif
static PyBufferProcs gentype_as_buffer = {
#if !defined(NPY_PY3K)
@@ -2350,10 +2323,8 @@ static PyBufferProcs gentype_as_buffer = {
gentype_getsegcount, /* bf_getsegcount*/
gentype_getcharbuf, /* bf_getcharbuffer*/
#endif
-#if PY_VERSION_HEX >= 0x02060000
gentype_getbuffer, /* bf_getbuffer */
NULL, /* bf_releasebuffer */
-#endif
};
@@ -2422,9 +2393,7 @@ NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
static void
@@ -2741,7 +2710,6 @@ bool_arrtype_nonzero(PyObject *a)
return a == PyArrayScalar_True;
}
-#if PY_VERSION_HEX >= 0x02050000
/**begin repeat
* #name = byte, short, int, long, ubyte, ushort, longlong, uint,
* ulong, ulonglong#
@@ -2763,7 +2731,6 @@ bool_index(PyObject *a)
{
return PyInt_FromLong(PyArrayScalar_VAL(a, Bool));
}
-#endif
/* Arithmetic methods -- only so we can override &, |, ^. */
NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = {
@@ -2825,9 +2792,7 @@ NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = {
0, /* nb_inplace_floor_divide */
0, /* nb_inplace_true_divide */
/* Added in release 2.5 */
-#if PY_VERSION_HEX >= 0x02050000
0, /* nb_index */
-#endif
};
static PyObject *
@@ -2939,7 +2904,7 @@ int_arrtype_hash(PyObject *obj)
* #ext = && (x >= LONG_MIN),#
*/
#if NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG
-/* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */
+/* we assume NPY_SIZEOF_LONGLONG=2*NPY_SIZEOF_LONG */
static long
@char@longlong_arrtype_hash(PyObject *obj)
{
@@ -3182,7 +3147,6 @@ object_arrtype_inplace_repeat(PyObjectScalarObject *self, Py_ssize_t count)
}
static PySequenceMethods object_arrtype_as_sequence = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)object_arrtype_length, /*sq_length*/
(binaryfunc)object_arrtype_concat, /*sq_concat*/
(ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/
@@ -3193,30 +3157,12 @@ static PySequenceMethods object_arrtype_as_sequence = {
(objobjproc)object_arrtype_contains, /* sq_contains */
(binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */
(ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */
-#else
- (inquiry)object_arrtype_length, /*sq_length*/
- (binaryfunc)object_arrtype_concat, /*sq_concat*/
- (intargfunc)object_arrtype_repeat, /*sq_repeat*/
- 0, /*sq_item*/
- 0, /*sq_slice*/
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
- (objobjproc)object_arrtype_contains, /* sq_contains */
- (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */
- (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */
-#endif
};
static PyMappingMethods object_arrtype_as_mapping = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)object_arrtype_length,
(binaryfunc)object_arrtype_subscript,
(objobjargproc)object_arrtype_ass_subscript,
-#else
- (inquiry)object_arrtype_length,
- (binaryfunc)object_arrtype_subscript,
- (objobjargproc)object_arrtype_ass_subscript,
-#endif
};
#if !defined(NPY_PY3K)
@@ -3285,7 +3231,6 @@ object_arrtype_getcharbuf(PyObjectScalarObject *self, Py_ssize_t segment,
}
#endif
-#if PY_VERSION_HEX >= 0x02060000
static int
object_arrtype_getbuffer(PyObjectScalarObject *self, Py_buffer *view, int flags)
{
@@ -3311,26 +3256,16 @@ object_arrtype_releasebuffer(PyObjectScalarObject *self, Py_buffer *view)
(*pb->bf_releasebuffer)(self->obval, view);
}
}
-#endif
static PyBufferProcs object_arrtype_as_buffer = {
#if !defined(NPY_PY3K)
-#if PY_VERSION_HEX >= 0x02050000
(readbufferproc)object_arrtype_getreadbuf,
(writebufferproc)object_arrtype_getwritebuf,
(segcountproc)object_arrtype_getsegcount,
(charbufferproc)object_arrtype_getcharbuf,
-#else
- (getreadbufferproc)object_arrtype_getreadbuf,
- (getwritebufferproc)object_arrtype_getwritebuf,
- (getsegcountproc)object_arrtype_getsegcount,
- (getcharbufferproc)object_arrtype_getcharbuf,
-#endif
#endif
-#if PY_VERSION_HEX >= 0x02060000
(getbufferproc)object_arrtype_getbuffer,
(releasebufferproc)object_arrtype_releasebuffer,
-#endif
};
static PyObject *
@@ -3395,9 +3330,7 @@ NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
static PyObject *
@@ -3513,9 +3446,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/**end repeat**/
@@ -3604,9 +3535,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
#undef _THIS_SIZE
@@ -3706,9 +3635,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
#undef _THIS_SIZE1
#undef _THIS_SIZE2
@@ -4066,7 +3993,6 @@ initialize_numeric_types(void)
PyGenericArrType_Type.tp_richcompare = gentype_richcompare;
PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number;
-#if PY_VERSION_HEX >= 0x02050000
/*
* need to add dummy versions with filled-in nb_index
* in-order for PyType_Ready to fill in .__index__() method
@@ -4083,7 +4009,6 @@ initialize_numeric_types(void)
/**end repeat**/
PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index;
-#endif
PyStringArrType_Type.tp_alloc = NULL;
PyStringArrType_Type.tp_free = NULL;
diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c
index e5f74251e..310c01084 100644
--- a/numpy/core/src/multiarray/sequence.c
+++ b/numpy/core/src/multiarray/sequence.c
@@ -130,29 +130,16 @@ array_contains(PyArrayObject *self, PyObject *el)
}
NPY_NO_EXPORT PySequenceMethods array_as_sequence = {
-#if PY_VERSION_HEX >= 0x02050000
(lenfunc)array_length, /*sq_length*/
(binaryfunc)NULL, /*sq_concat is handled by nb_add*/
(ssizeargfunc)NULL,
- (ssizeargfunc)array_item_nice,
+ (ssizeargfunc)array_item,
(ssizessizeargfunc)array_slice,
(ssizeobjargproc)array_ass_item, /*sq_ass_item*/
(ssizessizeobjargproc)array_ass_slice, /*sq_ass_slice*/
(objobjproc) array_contains, /*sq_contains */
(binaryfunc) NULL, /*sg_inplace_concat */
(ssizeargfunc)NULL,
-#else
- (inquiry)array_length, /*sq_length*/
- (binaryfunc)NULL, /*sq_concat is handled by nb_add*/
- (intargfunc)NULL, /*sq_repeat is handled nb_multiply*/
- (intargfunc)array_item_nice, /*sq_item*/
- (intintargfunc)array_slice, /*sq_slice*/
- (intobjargproc)array_ass_item, /*sq_ass_item*/
- (intintobjargproc)array_ass_slice, /*sq_ass_slice*/
- (objobjproc) array_contains, /*sq_contains */
- (binaryfunc) NULL, /*sg_inplace_concat */
- (intargfunc) NULL /*sg_inplace_repeat */
-#endif
};
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 482166598..67ee4b04b 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -120,7 +120,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) {
PyObject *zero = PyInt_FromLong(0);
char *optr;
- optr = PyArray_DATA(self) + oldsize*elsize;
+ optr = PyArray_BYTES(self) + oldsize*elsize;
n = newsize - oldsize;
for (k = 0; k < n; k++) {
_putzero((char *)optr, zero, PyArray_DESCR(self));
@@ -129,7 +129,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
Py_DECREF(zero);
}
else{
- memset(PyArray_DATA(self)+oldsize*elsize, 0, (newsize-oldsize)*elsize);
+ memset(PyArray_BYTES(self)+oldsize*elsize, 0, (newsize-oldsize)*elsize);
}
}
@@ -214,9 +214,11 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims,
* in order to get the right orientation and
* because we can't just re-use the buffer with the
* data in the order it is in.
+ * NPY_RELAXED_STRIDES_CHECKING: size check is unnecessary when set.
*/
- if ((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
- (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self))) {
+ if ((PyArray_SIZE(self) > 1) &&
+ ((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) ||
+ (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) {
int success = 0;
success = _attempt_nocopy_reshape(self, ndim, dimensions,
newstrides, order);
@@ -355,10 +357,14 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
int oldnd;
npy_intp olddims[NPY_MAXDIMS];
npy_intp oldstrides[NPY_MAXDIMS];
+ npy_intp np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
- int np, op;
oldnd = 0;
+ /*
+ * Remove axes with dimension 1 from the old array. They have no effect
+ * but would need special cases since their strides do not matter.
+ */
for (oi = 0; oi < PyArray_NDIM(self); oi++) {
if (PyArray_DIMS(self)[oi]!= 1) {
olddims[oldnd] = PyArray_DIMS(self)[oi];
@@ -390,27 +396,31 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
/* different total sizes; no hope */
return 0;
}
- /* the current code does not handle 0-sized arrays, so give up */
+
if (np == 0) {
+ /* the current code does not handle 0-sized arrays, so give up */
return 0;
}
+ /* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
- while(ni < newnd && oi < oldnd) {
+ while (ni < newnd && oi < oldnd) {
np = newdims[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
+ /* Misses trailing 1s, these are handled later */
np *= newdims[nj++];
} else {
op *= olddims[oj++];
}
}
+ /* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (is_f_order) {
if (oldstrides[ok+1] != olddims[ok]*oldstrides[ok]) {
@@ -427,6 +437,7 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
}
}
+ /* Calculate new strides for all axes currently worked with */
if (is_f_order) {
newstrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
@@ -445,6 +456,22 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
}
/*
+ * Set strides corresponding to trailing 1s of the new shape.
+ */
+ if (ni >= 1) {
+ last_stride = newstrides[ni - 1];
+ }
+ else {
+ last_stride = PyArray_ITEMSIZE(self);
+ }
+ if (is_f_order) {
+ last_stride *= newdims[ni - 1];
+ }
+ for (nk = ni; nk < newnd; nk++) {
+ newstrides[nk] = last_stride;
+ }
+
+ /*
fprintf(stderr, "success: _attempt_nocopy_reshape (");
for (oi=0; oi<oldnd; oi++)
fprintf(stderr, "(%d,%d), ", olddims[oi], oldstrides[oi]);
@@ -1077,7 +1104,9 @@ build_shape_string(npy_intp n, npy_intp *vals)
* the array will point to invalid memory. The caller must
* validate this!
* If an axis flagged for removal has a shape larger then one,
- * the arrays contiguous flags may require updating.
+ * the aligned flag (and in the future the contiguous flags),
+ * may need explicite update.
+ * (check also NPY_RELAXED_STRIDES_CHECKING)
*
* For example, this can be used to remove the reduction axes
* from a reduction result once its computation is complete.
@@ -1100,4 +1129,7 @@ PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
/* The final number of dimensions */
fa->nd = idim_out;
+
+ /* May not be necessary for NPY_RELAXED_STRIDES_CHECKING (see comment) */
+ PyArray_UpdateFlags(arr, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS);
}
diff --git a/numpy/core/src/multiarray/testcalcs.py b/numpy/core/src/multiarray/testcalcs.py
index 8cb440820..9182ae2c3 100644
--- a/numpy/core/src/multiarray/testcalcs.py
+++ b/numpy/core/src/multiarray/testcalcs.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from scipy import weave
class YMD(object):
diff --git a/numpy/core/src/multiarray/ucsnarrow.c b/numpy/core/src/multiarray/ucsnarrow.c
index b0afdc6d9..54a9d5671 100644
--- a/numpy/core/src/multiarray/ucsnarrow.c
+++ b/numpy/core/src/multiarray/ucsnarrow.c
@@ -13,6 +13,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "ctors.h"
/*
* Functions only needed on narrow builds of Python for converting back and
diff --git a/numpy/core/src/npymath/_signbit.c b/numpy/core/src/npymath/_signbit.c
index a2ad38162..12af55387 100644
--- a/numpy/core/src/npymath/_signbit.c
+++ b/numpy/core/src/npymath/_signbit.c
@@ -12,7 +12,7 @@ _npy_signbit_d(double x)
u.d = x;
-#if SIZEOF_INT == 4
+#if NPY_SIZEOF_INT == 4
#ifdef WORDS_BIGENDIAN /* defined in pyconfig.h */
return u.i[0] < 0;
@@ -20,7 +20,7 @@ _npy_signbit_d(double x)
return u.i[1] < 0;
#endif
-#else /* SIZEOF_INT != 4 */
+#else /* NPY_SIZEOF_INT != 4 */
#ifdef WORDS_BIGENDIAN
return u.s[0] < 0;
@@ -28,5 +28,5 @@ _npy_signbit_d(double x)
return u.s[3] < 0;
#endif
-#endif /* SIZEOF_INT */
+#endif /* NPY_SIZEOF_INT */
}
diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c
index 3f4aa9782..883830795 100644
--- a/numpy/core/src/npymath/halffloat.c
+++ b/numpy/core/src/npymath/halffloat.c
@@ -137,7 +137,7 @@ npy_half npy_half_nextafter(npy_half x, npy_half y)
ret = x+1;
}
}
-#ifdef NPY_HALF_GENERATE_OVERFLOW
+#if NPY_HALF_GENERATE_OVERFLOW
if (npy_half_isinf(ret)) {
npy_set_floatstatus_overflow();
}
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index 722d03f94..2bca6bf60 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -185,7 +185,6 @@ do { \
*/
typedef npy_uint32 IEEEl2bitsrep_part;
-/* my machine */
union IEEEl2bitsrep {
npy_longdouble e;
@@ -250,6 +249,44 @@ do { \
typedef npy_uint32 ldouble_man_t;
typedef npy_uint32 ldouble_exp_t;
typedef npy_uint32 ldouble_sign_t;
+#elif defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE)
+ /*
+ * Motorola extended 80 bits precision. Bit representation is
+ * | s |eeeeeeeeeeeeeee| junk |mmmmmmmm................mmmmmmm|
+ * | 1 bit | 15 bits | 16 bits| 64 bits |
+ * | a[0] | a[1] | a[2] |
+ *
+ * 16 low bits of a[0] are junk
+ */
+ typedef npy_uint32 IEEEl2bitsrep_part;
+
+
+ union IEEEl2bitsrep {
+ npy_longdouble e;
+ IEEEl2bitsrep_part a[3];
+ };
+
+ #define LDBL_MANL_INDEX 2
+ #define LDBL_MANL_MASK 0xFFFFFFFF
+ #define LDBL_MANL_SHIFT 0
+
+ #define LDBL_MANH_INDEX 1
+ #define LDBL_MANH_MASK 0xFFFFFFFF
+ #define LDBL_MANH_SHIFT 0
+
+ #define LDBL_EXP_INDEX 0
+ #define LDBL_EXP_MASK 0x7FFF0000
+ #define LDBL_EXP_SHIFT 16
+
+ #define LDBL_SIGN_INDEX 0
+ #define LDBL_SIGN_MASK 0x80000000
+ #define LDBL_SIGN_SHIFT 31
+
+ #define LDBL_NBIT 0x80000000
+
+ typedef npy_uint32 ldouble_man_t;
+ typedef npy_uint32 ldouble_exp_t;
+ typedef npy_uint32 ldouble_sign_t;
#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE)
/* 64 bits IEEE double precision aligned on 16 bytes: used by ppc arch on
diff --git a/numpy/core/src/npysort/mergesort.c.src b/numpy/core/src/npysort/mergesort.c.src
index 1003b95d1..55dcab4fb 100644
--- a/numpy/core/src/npysort/mergesort.c.src
+++ b/numpy/core/src/npysort/mergesort.c.src
@@ -112,7 +112,6 @@ mergesort_@suff@(@type@ *start, npy_intp num, void *NOT_USED)
pr = pl + num;
pw = (@type@ *) malloc((num/2) * sizeof(@type@));
if (pw == NULL) {
- PyErr_NoMemory();
return -NPY_ENOMEM;
}
mergesort0_@suff@(pl, pr, pw);
@@ -176,7 +175,6 @@ amergesort_@suff@(@type@ *v, npy_intp *tosort, npy_intp num, void *NOT_USED)
pr = pl + num;
pw = (npy_intp *) malloc((num/2) * sizeof(npy_intp));
if (pw == NULL) {
- PyErr_NoMemory();
return -NPY_ENOMEM;
}
amergesort0_@suff@(pl, pr, v, pw);
@@ -259,13 +257,11 @@ mergesort_@suff@(@type@ *start, npy_intp num, PyArrayObject *arr)
pr = pl + num*len;
pw = (@type@ *) malloc((num/2) * elsize);
if (pw == NULL) {
- PyErr_NoMemory();
err = -NPY_ENOMEM;
goto fail_0;
}
vp = (@type@ *) malloc(elsize);
if (vp == NULL) {
- PyErr_NoMemory();
err = -NPY_ENOMEM;
goto fail_1;
}
@@ -335,7 +331,6 @@ amergesort_@suff@(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr)
pr = pl + num;
pw = (npy_intp *) malloc((num/2) * sizeof(npy_intp));
if (pw == NULL) {
- PyErr_NoMemory();
return -NPY_ENOMEM;
}
amergesort0_@suff@(pl, pr, v, pw, len);
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
new file mode 100644
index 000000000..a41aa87f1
--- /dev/null
+++ b/numpy/core/src/npysort/selection.c.src
@@ -0,0 +1,459 @@
+/* -*- c -*- */
+
+/*
+ *
+ * The code is loosely based on the quickselect from
+ * Nicolas Devillard - 1998 public domain
+ * http://ndevilla.free.fr/median/median/
+ *
+ * Quick select with median of 3 pivot is usually the fastest,
+ * but the worst case scenario can be quadratic complexcity,
+ * e.g. np.roll(np.arange(x), x / 2)
+ * To avoid this if it recurses too much it falls back to the
+ * worst case linear median of median of group 5 pivot strategy.
+ */
+
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "npysort_common.h"
+#include "numpy/npy_math.h"
+#include "npy_partition.h"
+#include <stdlib.h>
+
+#define NOT_USED NPY_UNUSED(unused)
+
+
+/*
+ *****************************************************************************
+ ** NUMERIC SORTS **
+ *****************************************************************************
+ */
+
+
+static NPY_INLINE void store_pivot(npy_intp pivot, npy_intp kth,
+ npy_intp * pivots, npy_intp * npiv)
+{
+ if (pivots == NULL) {
+ return;
+ }
+
+ /*
+ * If pivot is the requested kth store it, overwritting other pivots if
+ * required. This must be done so iterative partition can work without
+ * manually shifting lower data offset by kth each time
+ */
+ if (pivot == kth && *npiv == NPY_MAX_PIVOT_STACK) {
+ pivots[*npiv - 1] = pivot;
+ }
+ /*
+ * we only need pivots larger than current kth, larger pivots are not
+ * useful as partitions on smaller kth would reorder the stored pivots
+ */
+ else if (pivot >= kth && *npiv < NPY_MAX_PIVOT_STACK) {
+ pivots[*npiv] = pivot;
+ (*npiv) += 1;
+ }
+}
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
+ * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
+ * npy_cdouble, npy_clongdouble#
+ */
+
+static npy_intp
+amedian_of_median5_@suff@(@type@ *v, npy_intp* tosort, const npy_intp num,
+ npy_intp * pivots,
+ npy_intp * npiv);
+
+static npy_intp
+median_of_median5_@suff@(@type@ *v, const npy_intp num,
+ npy_intp * pivots,
+ npy_intp * npiv);
+
+/**begin repeat1
+ * #name = , a#
+ * #idx = , tosort#
+ * #arg = 0, 1#
+ */
+#if @arg@
+/* helper macros to avoid duplication of direct/indirect selection */
+#define IDX(x) tosort[x]
+#define SORTEE(x) tosort[x]
+#define SWAP INTP_SWAP
+#define MEDIAN3_SWAP(v, tosort, low, mid, high) \
+ amedian3_swap_@suff@(v, tosort, low, mid, high)
+#define MEDIAN5(v, tosort, subleft) \
+ amedian5_@suff@(v, tosort + subleft)
+#define UNGUARDED_PARTITION(v, tosort, pivot, ll, hh) \
+ aunguarded_partition_@suff@(v, tosort, pivot, ll, hh)
+#define INTROSELECT(v, tosort, num, kth, pivots, npiv) \
+ aintroselect_@suff@(v, tosort, nmed, nmed / 2, pivots, npiv, NULL)
+#define DUMBSELECT(v, tosort, left, num, kth) \
+ adumb_select_@suff@(v, tosort + left, num, kth)
+#else
+#define IDX(x) (x)
+#define SORTEE(x) v[x]
+#define SWAP @TYPE@_SWAP
+#define MEDIAN3_SWAP(v, tosort, low, mid, high) \
+ median3_swap_@suff@(v, low, mid, high)
+#define MEDIAN5(v, tosort, subleft) \
+ median5_@suff@(v + subleft)
+#define UNGUARDED_PARTITION(v, tosort, pivot, ll, hh) \
+ unguarded_partition_@suff@(v, pivot, ll, hh)
+#define INTROSELECT(v, tosort, num, kth, pivots, npiv) \
+ introselect_@suff@(v, nmed, nmed / 2, pivots, npiv, NULL)
+#define DUMBSELECT(v, tosort, left, num, kth) \
+ dumb_select_@suff@(v + left, num, kth)
+#endif
+
+
+/*
+ * median of 3 pivot strategy
+ * gets min and median and moves median to low and min to low + 1
+ * for efficient partitioning, see unguarded_partition
+ */
+static NPY_INLINE void
+@name@median3_swap_@suff@(@type@ * v,
+#if @arg@
+ npy_intp * tosort,
+#endif
+ npy_intp low, npy_intp mid, npy_intp high)
+{
+ if (@TYPE@_LT(v[IDX(high)], v[IDX(mid)]))
+ SWAP(SORTEE(high), SORTEE(mid));
+ if (@TYPE@_LT(v[IDX(high)], v[IDX(low)]))
+ SWAP(SORTEE(high), SORTEE(low));
+ /* move pivot to low */
+ if (@TYPE@_LT(v[IDX(low)], v[IDX(mid)]))
+ SWAP(SORTEE(low), SORTEE(mid));
+ /* move 3-lowest element to low + 1 */
+ SWAP(SORTEE(mid), SORTEE(low + 1));
+}
+
+
+/* select index of median of five elements */
+static npy_intp @name@median5_@suff@(
+#if @arg@
+ const @type@ * v, npy_intp * tosort
+#else
+ @type@ * v
+#endif
+ )
+{
+ /* could be optimized as we only need the index (no swaps) */
+ if (@TYPE@_LT(v[IDX(1)], v[IDX(0)])) {
+ SWAP(SORTEE(1), SORTEE(0));
+ }
+ if (@TYPE@_LT(v[IDX(4)], v[IDX(3)])) {
+ SWAP(SORTEE(4), SORTEE(3));
+ }
+ if (@TYPE@_LT(v[IDX(3)], v[IDX(0)])) {
+ SWAP(SORTEE(3), SORTEE(0));
+ }
+ if (@TYPE@_LT(v[IDX(4)], v[IDX(1)])) {
+ SWAP(SORTEE(4), SORTEE(1));
+ }
+ if (@TYPE@_LT(v[IDX(2)], v[IDX(1)])) {
+ SWAP(SORTEE(2), SORTEE(1));
+ }
+ if (@TYPE@_LT(v[IDX(3)], v[IDX(2)])) {
+ if (@TYPE@_LT(v[IDX(3)], v[IDX(1)])) {
+ return 1;
+ }
+ else {
+ return 3;
+ }
+ }
+ else {
+ if (@TYPE@_LT(v[IDX(2)], v[IDX(1)])) {
+ return 1;
+ }
+ else {
+ return 2;
+ }
+ }
+}
+
+
+/*
+ * partition and return the index were the pivot belongs
+ * the data must have following property to avoid bound checks:
+ * ll ... hh
+ * lower-than-pivot [x x x x] larger-than-pivot
+ */
+static NPY_INLINE void
+@name@unguarded_partition_@suff@(@type@ * v,
+#if @arg@
+ npy_intp * tosort,
+#endif
+ const @type@ pivot,
+ npy_intp * ll, npy_intp * hh)
+{
+ for (;;) {
+ do (*ll)++; while (@TYPE@_LT(v[IDX(*ll)], pivot));
+ do (*hh)--; while (@TYPE@_LT(pivot, v[IDX(*hh)]));
+
+ if (*hh < *ll)
+ break;
+
+ SWAP(SORTEE(*ll), SORTEE(*hh));
+ }
+}
+
+
+/*
+ * select median of median of blocks of 5
+ * if used as partition pivot it splits the range into at least 30%/70%
+ * allowing linear time worstcase quickselect
+ */
+static npy_intp
+@name@median_of_median5_@suff@(@type@ *v,
+#if @arg@
+ npy_intp* tosort,
+#endif
+ const npy_intp num,
+ npy_intp * pivots,
+ npy_intp * npiv)
+{
+ npy_intp i, subleft;
+ npy_intp right = num - 1;
+ npy_intp nmed = (right + 1) / 5;
+ for (i = 0, subleft = 0; i < nmed; i++, subleft += 5) {
+ npy_intp m = MEDIAN5(v, tosort, subleft);
+ SWAP(SORTEE(subleft + m), SORTEE(i));
+ }
+
+ if (nmed > 2)
+ INTROSELECT(v, tosort, nmed, nmed / 2, pivots, npiv);
+ return nmed / 2;
+}
+
+
+/*
+ * N^2 selection, fast only for very small kth
+ * useful for close multiple partitions
+ * (e.g. even element median, interpolating percentile)
+ */
+static int
+@name@dumb_select_@suff@(@type@ *v,
+#if @arg@
+ npy_intp * tosort,
+#endif
+ npy_intp num, npy_intp kth)
+{
+ npy_intp i;
+ for (i = 0; i <= kth; i++) {
+ npy_intp minidx = i;
+ @type@ minval = v[IDX(i)];
+ npy_intp k;
+ for (k = i + 1; k < num; k++) {
+ if (@TYPE@_LT(v[IDX(k)], minval)) {
+ minidx = k;
+ minval = v[IDX(k)];
+ }
+ }
+ SWAP(SORTEE(i), SORTEE(minidx));
+ }
+
+ return 0;
+}
+
+
+/*
+ * iterative median of 3 quickselect with cutoff to median-of-medians-of5
+ * receives stack of already computed pivots in v to minimize the
+ * partition size were kth is searched in
+ *
+ * area that needs partitioning in [...]
+ * kth 0: [8 7 6 5 4 3 2 1 0] -> med3 partitions elements [4, 2, 0]
+ * 0 1 2 3 4 8 7 5 6 -> pop requested kth -> stack [4, 2]
+ * kth 3: 0 1 2 [3] 4 8 7 5 6 -> stack [4]
+ * kth 5: 0 1 2 3 4 [8 7 5 6] -> stack [6]
+ * kth 8: 0 1 2 3 4 5 6 [8 7] -> stack []
+ *
+ */
+int
+@name@introselect_@suff@(@type@ *v,
+#if @arg@
+ npy_intp* tosort,
+#endif
+ npy_intp num, npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED)
+{
+ npy_intp low = 0;
+ npy_intp high = num - 1;
+ npy_intp depth_limit;
+
+ if (npiv == NULL)
+ pivots = NULL;
+
+ while (pivots != NULL && *npiv > 0) {
+ if (pivots[*npiv - 1] > kth) {
+ /* pivot larger than kth set it as upper bound */
+ high = pivots[*npiv - 1] - 1;
+ break;
+ }
+ else if (pivots[*npiv - 1] == kth) {
+ /* kth was already found in a previous iteration -> done */
+ return 0;
+ }
+
+ low = pivots[*npiv - 1] + 1;
+
+ /* pop from stack */
+ *npiv -= 1;
+ }
+
+ /*
+ * use a faster O(n*kth) algorithm for very small kth
+ * e.g. for interpolating percentile
+ */
+ if (kth - low < 3) {
+ DUMBSELECT(v, tosort, low, high - low + 1, kth - low);
+ store_pivot(kth, kth, pivots, npiv);
+ return 0;
+ }
+
+ /* dumb integer msb, float npy_log2 too slow for small parititions */
+ {
+ npy_uintp unum = num;
+ depth_limit = 0;
+ while (unum >>= 1) {
+ depth_limit++;
+ }
+ depth_limit *= 2;
+ }
+
+ /* guarantee three elements */
+ for (;low + 1 < high;) {
+ npy_intp ll = low + 1;
+ npy_intp hh = high;
+
+ /*
+ * if we aren't making sufficient progress with median of 3
+ * fall back to median-of-median5 pivot for linear worst case
+ * med3 for small sizes is required to do unguarded partition
+ */
+ if (depth_limit > 0 || hh - ll < 5) {
+ const npy_intp mid = low + (high - low) / 2;
+ /* median of 3 pivot strategy,
+ * swapping for efficient partition */
+ MEDIAN3_SWAP(v, tosort, low, mid, high);
+ }
+ else {
+ npy_intp mid;
+ /* FIXME: always use pivots to optimize this iterative partition */
+#if @arg@
+ mid = ll + amedian_of_median5_@suff@(v, tosort + ll, hh - ll, NULL, NULL);
+#else
+ mid = ll + median_of_median5_@suff@(v + ll, hh - ll, NULL, NULL);
+#endif
+ SWAP(SORTEE(mid), SORTEE(low));
+ /* adapt for the larger partition than med3 pivot */
+ ll--;
+ hh++;
+ }
+
+ depth_limit--;
+
+ /*
+ * find place to put pivot (in low):
+ * previous swapping removes need for bound checks
+ * pivot 3-lowest [x x x] 3-highest
+ */
+ UNGUARDED_PARTITION(v, tosort, v[IDX(low)], &ll, &hh);
+
+ /* move pivot into position */
+ SWAP(SORTEE(low), SORTEE(hh));
+
+ store_pivot(hh, kth, pivots, npiv);
+
+ if (hh >= kth)
+ high = hh - 1;
+ if (hh <= kth)
+ low = ll;
+ }
+
+ /* two elements */
+ if (high == low + 1) {
+ if (@TYPE@_LT(v[IDX(high)], v[IDX(low)]))
+ SWAP(SORTEE(high), SORTEE(low))
+ store_pivot(low, kth, pivots, npiv);
+ }
+
+ return 0;
+}
+
+
+#undef IDX
+#undef SWAP
+#undef SORTEE
+#undef MEDIAN3_SWAP
+#undef MEDIAN5
+#undef UNGUARDED_PARTITION
+#undef INTROSELECT
+#undef DUMBSELECT
+/**end repeat1**/
+
+/**end repeat**/
+
+
+/*
+ *****************************************************************************
+ ** STRING SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #TYPE = STRING, UNICODE#
+ * #suff = string, unicode#
+ * #type = npy_char, npy_ucs4#
+ */
+
+int
+introselect_@suff@(@type@ *start, npy_intp num, npy_intp kth, PyArrayObject *arr)
+{
+ return quicksort_@suff@(start, num, arr);
+}
+
+int
+aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num, npy_intp kth, void *null)
+{
+ return aquicksort_@suff@(v, tosort, num, null);
+}
+
+/**end repeat**/
+
+
+/*
+ *****************************************************************************
+ ** GENERIC SORT **
+ *****************************************************************************
+ */
+
+
+/*
+ * This sort has almost the same signature as libc qsort and is intended to
+ * supply an error return for compatibility with the other generic sort
+ * kinds.
+ */
+int
+npy_introselect(void *base, size_t num, size_t size, size_t kth, npy_comparator cmp)
+{
+ return npy_quicksort(base, num, size, cmp);
+}
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h
index 94c6a2121..f2e5d02c3 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/private/lowlevel_strided_loops.h
@@ -1,5 +1,7 @@
#ifndef __LOWLEVEL_STRIDED_LOOPS_H
#define __LOWLEVEL_STRIDED_LOOPS_H
+#include "common.h"
+#include <npy_config.h>
/*
* NOTE: This API should remain private for the time being, to allow
@@ -256,6 +258,7 @@ PyArray_CastRawArrays(npy_intp count,
* 'stransfer' with the provided dst_stride/src_stride and
* dst_strides[0]/src_strides[0], so the caller can use those values to
* specialize the function.
+ * Note that even if ndim == 0, everything needs to be set as if ndim == 1.
*
* The return value is the number of elements it couldn't copy. A return value
* of 0 means all elements were copied, a larger value means the end of
@@ -395,6 +398,119 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
char **out_dataB, npy_intp *out_stridesB,
char **out_dataC, npy_intp *out_stridesC);
+/*
+ * Return number of elements that must be peeled from
+ * the start of 'addr' with 'nvals' elements of size 'esize'
+ * in order to reach 'alignment'.
+ * alignment must be a power of two.
+ * see npy_blocked_end for an example
+ */
+static NPY_INLINE npy_uintp
+npy_aligned_block_offset(const void * addr, const npy_uintp esize,
+ const npy_uintp alignment, const npy_uintp nvals)
+{
+ const npy_uintp offset = (npy_uintp)addr & (alignment - 1);
+ npy_uintp peel = offset ? (alignment - offset) / esize : 0;
+ peel = nvals < peel ? nvals : peel;
+ return peel;
+}
+
+/*
+ * Return upper loop bound for an array of 'nvals' elements
+ * of size 'esize' peeled by 'offset' elements and blocking to
+ * a vector size of 'vsz' in bytes
+ *
+ * example usage:
+ * npy_intp i;
+ * double v[101];
+ * npy_intp esize = sizeof(v[0]);
+ * npy_intp peel = npy_aligned_block_offset(v, esize, 16, n);
+ * // peel to alignment 16
+ * for (i = 0; i < peel; i++)
+ * <scalar-op>
+ * // simd vectorized operation
+ * for (; i < npy_blocked_end(peel, esize, 16, n); i += 16 / esize)
+ * <blocked-op>
+ * // handle scalar rest
+ * for(; i < n; i++)
+ * <scalar-op>
+ */
+static NPY_INLINE npy_uintp
+npy_blocked_end(const npy_uintp offset, const npy_uintp esize,
+ const npy_uintp vsz, const npy_uintp nvals)
+{
+ return nvals - offset - (nvals - offset) % (vsz / esize);
+}
+
+
+/* byte swapping functions */
+static NPY_INLINE npy_uint16
+npy_bswap2(npy_uint16 x)
+{
+ return ((x & 0xffu) << 8) | (x >> 8);
+}
+
+/*
+ * treat as int16 and byteswap unaligned memory,
+ * some cpus don't support unaligned access
+ */
+static NPY_INLINE void
+npy_bswap2_unaligned(char * x)
+{
+ char a = x[0];
+ x[0] = x[1];
+ x[1] = a;
+}
+
+static NPY_INLINE npy_uint32
+npy_bswap4(npy_uint32 x)
+{
+#ifdef HAVE___BUILTIN_BSWAP32
+ return __builtin_bswap32(x);
+#else
+ return ((x & 0xffu) << 24) | ((x & 0xff00u) << 8) |
+ ((x & 0xff0000u) >> 8) | (x >> 24);
+#endif
+}
+
+static NPY_INLINE void
+npy_bswap4_unaligned(char * x)
+{
+ char a = x[0];
+ x[0] = x[3];
+ x[3] = a;
+ a = x[1];
+ x[1] = x[2];
+ x[2] = a;
+}
+
+static NPY_INLINE npy_uint64
+npy_bswap8(npy_uint64 x)
+{
+#ifdef HAVE___BUILTIN_BSWAP64
+ return __builtin_bswap64(x);
+#else
+ return ((x & 0xffULL) << 56) |
+ ((x & 0xff00ULL) << 40) |
+ ((x & 0xff0000ULL) << 24) |
+ ((x & 0xff000000ULL) << 8) |
+ ((x & 0xff00000000ULL) >> 8) |
+ ((x & 0xff0000000000ULL) >> 24) |
+ ((x & 0xff000000000000ULL) >> 40) |
+ ( x >> 56);
+#endif
+}
+
+static NPY_INLINE void
+npy_bswap8_unaligned(char * x)
+{
+ char a = x[0]; x[0] = x[7]; x[7] = a;
+ a = x[1]; x[1] = x[6]; x[6] = a;
+ a = x[2]; x[2] = x[5]; x[5] = a;
+ a = x[3]; x[3] = x[4]; x[4] = a;
+}
+
+
/* Start raw iteration */
#define NPY_RAW_ITER_START(idim, ndim, coord, shape) \
memset((coord), 0, (ndim) * sizeof(coord[0])); \
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 237dc94ab..453dbd065 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -2,6 +2,7 @@
#define _NPY_NPY_CONFIG_H_
#include "config.h"
+#include "numpy/numpyconfig.h"
/* Disable broken MS math functions */
#if defined(_MSC_VER) || defined(__MINGW32_VERSION)
@@ -10,7 +11,7 @@
#endif
/* Safe to use ldexp and frexp for long double for MSVC builds */
-#if (SIZEOF_LONG_DOUBLE == SIZEOF_DOUBLE) || defined(_MSC_VER)
+#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) || defined(_MSC_VER)
#ifdef HAVE_LDEXP
#define HAVE_LDEXPL 1
#endif
@@ -24,21 +25,4 @@
#undef HAVE_ATAN2
#endif
-/*
- * On Mac OS X, because there is only one configuration stage for all the archs
- * in universal builds, any macro which depends on the arch needs to be
- * harcoded
- */
-#ifdef __APPLE__
- #undef SIZEOF_LONG
- #undef SIZEOF_PY_INTPTR_T
-
- #ifdef __LP64__
- #define SIZEOF_LONG 8
- #define SIZEOF_PY_INTPTR_T 8
- #else
- #define SIZEOF_LONG 4
- #define SIZEOF_PY_INTPTR_T 4
- #endif
-#endif
#endif
diff --git a/numpy/core/src/private/npy_fpmath.h b/numpy/core/src/private/npy_fpmath.h
index 92338e4c7..8a120cab7 100644
--- a/numpy/core/src/private/npy_fpmath.h
+++ b/numpy/core/src/private/npy_fpmath.h
@@ -40,6 +40,7 @@
defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \
defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \
+ defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) || \
defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE))
#error No long double representation defined
#endif
diff --git a/numpy/core/src/private/npy_partition.h b/numpy/core/src/private/npy_partition.h
new file mode 100644
index 000000000..9e1c8494a
--- /dev/null
+++ b/numpy/core/src/private/npy_partition.h
@@ -0,0 +1,559 @@
+
+/*
+ *****************************************************************************
+ ** This file was autogenerated from a template DO NOT EDIT!!!! **
+ ** Changes should be made to the original source (.src) file **
+ *****************************************************************************
+ */
+
+#line 1
+/*
+ *****************************************************************************
+ ** IMPORTANT NOTE for npy_partition.h.src -> npy_partition.h **
+ *****************************************************************************
+ * The template file loops.h.src is not automatically converted into
+ * loops.h by the build system. If you edit this file, you must manually
+ * do the conversion using numpy/distutils/conv_template.py from the
+ * command line as follows:
+ *
+ * $ cd <NumPy source root directory>
+ * $ python numpy/distutils/conv_template.py numpy/core/src/private/npy_partition.h.src
+ * $
+ */
+
+
+#ifndef __NPY_PARTITION_H__
+#define __NPY_PARTITION_H__
+
+
+#include "npy_sort.h"
+
+/* Python include is for future object sorts */
+#include <Python.h>
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+#define NPY_MAX_PIVOT_STACK 50
+
+
+#line 43
+
+int introselect_bool(npy_bool *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_bool(npy_bool *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_byte(npy_byte *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_byte(npy_byte *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_ubyte(npy_ubyte *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_ubyte(npy_ubyte *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_short(npy_short *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_short(npy_short *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_ushort(npy_ushort *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_ushort(npy_ushort *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_int(npy_int *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_int(npy_int *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_uint(npy_uint *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_uint(npy_uint *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_long(npy_long *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_long(npy_long *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_ulong(npy_ulong *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_ulong(npy_ulong *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_longlong(npy_longlong *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_longlong(npy_longlong *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_ulonglong(npy_ulonglong *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_ulonglong(npy_ulonglong *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_half(npy_ushort *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_half(npy_ushort *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_float(npy_float *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_float(npy_float *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_double(npy_double *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_double(npy_double *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_longdouble(npy_longdouble *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_longdouble(npy_longdouble *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_cfloat(npy_cfloat *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_cfloat(npy_cfloat *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_cdouble(npy_cdouble *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_cdouble(npy_cdouble *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+#line 43
+
+int introselect_clongdouble(npy_clongdouble *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_clongdouble(npy_clongdouble *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+
+
+int introselect_string(npy_char *vec, npy_intp cnt, npy_intp kth, PyArrayObject *arr);
+int aintroselect_string(npy_char *vec, npy_intp *ind, npy_intp cnt, npy_intp kth, void *null);
+
+
+int introselect_unicode(npy_ucs4 *vec, npy_intp cnt, npy_intp kth, PyArrayObject *arr);
+int aintroselect_unicode(npy_ucs4 *vec, npy_intp *ind, npy_intp cnt, npy_intp kth, void *null);
+
+int npy_introselect(void *base, size_t num, size_t size, size_t kth, npy_comparator cmp);
+
+typedef struct {
+ enum NPY_TYPES typenum;
+ PyArray_PartitionFunc * part[NPY_NSELECTS];
+ PyArray_ArgPartitionFunc * argpart[NPY_NSELECTS];
+} part_map;
+
+static part_map _part_map[] = {
+#line 87
+ {
+ NPY_BOOL,
+ {
+ (PyArray_PartitionFunc *)&introselect_bool,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_bool,
+ }
+ },
+
+#line 87
+ {
+ NPY_BYTE,
+ {
+ (PyArray_PartitionFunc *)&introselect_byte,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_byte,
+ }
+ },
+
+#line 87
+ {
+ NPY_UBYTE,
+ {
+ (PyArray_PartitionFunc *)&introselect_ubyte,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_ubyte,
+ }
+ },
+
+#line 87
+ {
+ NPY_SHORT,
+ {
+ (PyArray_PartitionFunc *)&introselect_short,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_short,
+ }
+ },
+
+#line 87
+ {
+ NPY_USHORT,
+ {
+ (PyArray_PartitionFunc *)&introselect_ushort,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_ushort,
+ }
+ },
+
+#line 87
+ {
+ NPY_INT,
+ {
+ (PyArray_PartitionFunc *)&introselect_int,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_int,
+ }
+ },
+
+#line 87
+ {
+ NPY_UINT,
+ {
+ (PyArray_PartitionFunc *)&introselect_uint,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_uint,
+ }
+ },
+
+#line 87
+ {
+ NPY_LONG,
+ {
+ (PyArray_PartitionFunc *)&introselect_long,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_long,
+ }
+ },
+
+#line 87
+ {
+ NPY_ULONG,
+ {
+ (PyArray_PartitionFunc *)&introselect_ulong,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_ulong,
+ }
+ },
+
+#line 87
+ {
+ NPY_LONGLONG,
+ {
+ (PyArray_PartitionFunc *)&introselect_longlong,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_longlong,
+ }
+ },
+
+#line 87
+ {
+ NPY_ULONGLONG,
+ {
+ (PyArray_PartitionFunc *)&introselect_ulonglong,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_ulonglong,
+ }
+ },
+
+#line 87
+ {
+ NPY_HALF,
+ {
+ (PyArray_PartitionFunc *)&introselect_half,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_half,
+ }
+ },
+
+#line 87
+ {
+ NPY_FLOAT,
+ {
+ (PyArray_PartitionFunc *)&introselect_float,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_float,
+ }
+ },
+
+#line 87
+ {
+ NPY_DOUBLE,
+ {
+ (PyArray_PartitionFunc *)&introselect_double,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_double,
+ }
+ },
+
+#line 87
+ {
+ NPY_LONGDOUBLE,
+ {
+ (PyArray_PartitionFunc *)&introselect_longdouble,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_longdouble,
+ }
+ },
+
+#line 87
+ {
+ NPY_CFLOAT,
+ {
+ (PyArray_PartitionFunc *)&introselect_cfloat,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_cfloat,
+ }
+ },
+
+#line 87
+ {
+ NPY_CDOUBLE,
+ {
+ (PyArray_PartitionFunc *)&introselect_cdouble,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_cdouble,
+ }
+ },
+
+#line 87
+ {
+ NPY_CLONGDOUBLE,
+ {
+ (PyArray_PartitionFunc *)&introselect_clongdouble,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_clongdouble,
+ }
+ },
+
+};
+
+
+static NPY_INLINE PyArray_PartitionFunc *
+get_partition_func(int type, NPY_SELECTKIND which)
+{
+ npy_intp i;
+ if (which >= NPY_NSELECTS) {
+ return NULL;
+ }
+ for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ if (type == _part_map[i].typenum) {
+ return _part_map[i].part[which];
+ }
+ }
+ return NULL;
+}
+
+
+static NPY_INLINE PyArray_ArgPartitionFunc *
+get_argpartition_func(int type, NPY_SELECTKIND which)
+{
+ npy_intp i;
+ if (which >= NPY_NSELECTS) {
+ return NULL;
+ }
+ for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ if (type == _part_map[i].typenum) {
+ return _part_map[i].argpart[which];
+ }
+ }
+ return NULL;
+}
+
+#endif
+
diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/private/npy_partition.h.src
new file mode 100644
index 000000000..c03ecb0de
--- /dev/null
+++ b/numpy/core/src/private/npy_partition.h.src
@@ -0,0 +1,131 @@
+/*
+ *****************************************************************************
+ ** IMPORTANT NOTE for npy_partition.h.src -> npy_partition.h **
+ *****************************************************************************
+ * The template file loops.h.src is not automatically converted into
+ * loops.h by the build system. If you edit this file, you must manually
+ * do the conversion using numpy/distutils/conv_template.py from the
+ * command line as follows:
+ *
+ * $ cd <NumPy source root directory>
+ * $ python numpy/distutils/conv_template.py numpy/core/src/private/npy_partition.h.src
+ * $
+ */
+
+
+#ifndef __NPY_PARTITION_H__
+#define __NPY_PARTITION_H__
+
+
+#include "npy_sort.h"
+
+/* Python include is for future object sorts */
+#include <Python.h>
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+#define NPY_MAX_PIVOT_STACK 50
+
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
+ * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
+ * npy_cdouble, npy_clongdouble#
+ */
+
+int introselect_@suff@(@type@ *v, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num,
+ npy_intp kth,
+ npy_intp * pivots,
+ npy_intp * npiv,
+ void *NOT_USED);
+
+
+/**end repeat**/
+
+int introselect_string(npy_char *vec, npy_intp cnt, npy_intp kth, PyArrayObject *arr);
+int aintroselect_string(npy_char *vec, npy_intp *ind, npy_intp cnt, npy_intp kth, void *null);
+
+
+int introselect_unicode(npy_ucs4 *vec, npy_intp cnt, npy_intp kth, PyArrayObject *arr);
+int aintroselect_unicode(npy_ucs4 *vec, npy_intp *ind, npy_intp cnt, npy_intp kth, void *null);
+
+int npy_introselect(void *base, size_t num, size_t size, size_t kth, npy_comparator cmp);
+
+typedef struct {
+ enum NPY_TYPES typenum;
+ PyArray_PartitionFunc * part[NPY_NSELECTS];
+ PyArray_ArgPartitionFunc * argpart[NPY_NSELECTS];
+} part_map;
+
+static part_map _part_map[] = {
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
+ * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
+ * npy_cdouble, npy_clongdouble#
+ */
+ {
+ NPY_@TYPE@,
+ {
+ (PyArray_PartitionFunc *)&introselect_@suff@,
+ },
+ {
+ (PyArray_ArgPartitionFunc *)&aintroselect_@suff@,
+ }
+ },
+/**end repeat**/
+};
+
+
+static NPY_INLINE PyArray_PartitionFunc *
+get_partition_func(int type, NPY_SELECTKIND which)
+{
+ npy_intp i;
+ if (which >= NPY_NSELECTS) {
+ return NULL;
+ }
+ for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ if (type == _part_map[i].typenum) {
+ return _part_map[i].part[which];
+ }
+ }
+ return NULL;
+}
+
+
+static NPY_INLINE PyArray_ArgPartitionFunc *
+get_argpartition_func(int type, NPY_SELECTKIND which)
+{
+ npy_intp i;
+ if (which >= NPY_NSELECTS) {
+ return NULL;
+ }
+ for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ if (type == _part_map[i].typenum) {
+ return _part_map[i].argpart[which];
+ }
+ }
+ return NULL;
+}
+
+#endif
diff --git a/numpy/core/src/private/npy_pycompat.h b/numpy/core/src/private/npy_pycompat.h
index 6b49c7476..aa0b5c122 100644
--- a/numpy/core/src/private/npy_pycompat.h
+++ b/numpy/core/src/private/npy_pycompat.h
@@ -3,22 +3,4 @@
#include "numpy/npy_3kcompat.h"
-/*
- * Accessing items of ob_base
- */
-
-#if (PY_VERSION_HEX < 0x02060000)
-#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
-#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt)
-#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size)
-#endif
-
-/*
- * PyIndex_Check
- */
-#if (PY_VERSION_HEX < 0x02050000)
-#undef PyIndex_Check
-#define PyIndex_Check(o) 0
-#endif
-
#endif /* _NPY_COMPAT_H_ */
diff --git a/numpy/core/src/scalarmathmodule.c.src b/numpy/core/src/scalarmathmodule.c.src
index 3241f97b8..307c7ddb0 100644
--- a/numpy/core/src/scalarmathmodule.c.src
+++ b/numpy/core/src/scalarmathmodule.c.src
@@ -24,7 +24,7 @@
* types of the same rank to have the same width.
*/
-#if SIZEOF_LONGLONG == 64
+#if NPY_SIZEOF_LONGLONG == 64
static int
ulonglong_overflow(npy_ulonglong a, npy_ulonglong b)
@@ -72,7 +72,7 @@ slonglong_overflow(npy_longlong a0, npy_longlong b0)
(((x & mask) + (y & mask) + (w >> 32)) >> 31);
}
-#elif SIZEOF_LONGLONG == 128
+#elif NPY_SIZEOF_LONGLONG == 128
static int
ulonglong_overflow(npy_ulonglong a, npy_ulonglong b)
@@ -203,8 +203,8 @@ static void
}
/**end repeat**/
-#ifndef SIZEOF_BYTE
-#define SIZEOF_BYTE 1
+#ifndef NPY_SIZEOF_BYTE
+#define NPY_SIZEOF_BYTE 1
#endif
/**begin repeat
@@ -494,16 +494,25 @@ half_ctype_remainder(npy_half a, npy_half b, npy_half *out) {
/**end repeat**/
/**begin repeat
- * #name = half, float, double, longdouble#
- * #type = npy_half, npy_float, npy_double, npy_longdouble#
+ * #name = float, double, longdouble#
+ * #type = npy_float, npy_double, npy_longdouble#
*/
static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b);
static void
-@name@_ctype_power(@type@ a, @type@ b, @type@ *out) {
+@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
+{
*out = _basic_@name@_pow(a, b);
}
/**end repeat**/
+static void
+half_ctype_power(npy_half a, npy_half b, npy_half *out)
+{
+ const npy_float af = npy_half_to_float(a);
+ const npy_float bf = npy_half_to_float(b);
+ const npy_float outf = _basic_float_pow(af,bf);
+ *out = npy_float_to_half(outf);
+}
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
@@ -654,19 +663,19 @@ static void
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
- * half, float, double, longdouble,
+ * half, float, longdouble,
* cfloat, cdouble, clongdouble#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
+ * npy_half, npy_float, npy_longdouble,
* npy_cfloat, npy_cdouble, npy_clongdouble#
* #Name = Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
- * Half, Float, Double, LongDouble,
+ * Half, Float, LongDouble,
* CFloat, CDouble, CLongDouble#
* #TYPE = NPY_BYTE, NPY_UBYTE, NPY_SHORT, NPY_USHORT, NPY_INT, NPY_UINT,
* NPY_LONG, NPY_ULONG, NPY_LONGLONG, NPY_ULONGLONG,
- * NPY_HALF, NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+ * NPY_HALF, NPY_FLOAT, NPY_LONGDOUBLE,
* NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE#
*/
@@ -711,6 +720,63 @@ _@name@_convert_to_ctype(PyObject *a, @type@ *arg1)
/**end repeat**/
+/* Same as above but added exact checks against known python types for speed */
+
+/**begin repeat
+ * #name = double#
+ * #type = npy_double#
+ * #Name = Double#
+ * #TYPE = NPY_DOUBLE#
+ * #PYCHECKEXACT = PyFloat_CheckExact#
+ * #PYEXTRACTCTYPE = PyFloat_AS_DOUBLE#
+ */
+
+static int
+_@name@_convert_to_ctype(PyObject *a, @type@ *arg1)
+{
+ PyObject *temp;
+
+ if (@PYCHECKEXACT@(a)){
+ *arg1 = @PYEXTRACTCTYPE@(a);
+ return 0;
+ }
+
+ if (PyArray_IsScalar(a, @Name@)) {
+ *arg1 = PyArrayScalar_VAL(a, @Name@);
+ return 0;
+ }
+ else if (PyArray_IsScalar(a, Generic)) {
+ PyArray_Descr *descr1;
+
+ if (!PyArray_IsScalar(a, Number)) {
+ return -1;
+ }
+ descr1 = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(a));
+ if (PyArray_CanCastSafely(descr1->type_num, @TYPE@)) {
+ PyArray_CastScalarDirect(a, descr1, arg1, @TYPE@);
+ Py_DECREF(descr1);
+ return 0;
+ }
+ else {
+ Py_DECREF(descr1);
+ return -1;
+ }
+ }
+ else if (PyArray_GetPriority(a, NPY_PRIORITY) > NPY_PRIORITY) {
+ return -2;
+ }
+ else if ((temp = PyArray_ScalarFromObject(a)) != NULL) {
+ int retval = _@name@_convert_to_ctype(temp, arg1);
+
+ Py_DECREF(temp);
+ return retval;
+ }
+ return -2;
+}
+
+/**end repeat**/
+
+
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
@@ -1130,7 +1196,6 @@ static PyObject *
int first;
@type@ out = @zero@;
-
switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) {
case 0:
break;
@@ -1373,7 +1438,7 @@ NONZERO_NAME(@name@_)(PyObject *a)
static int
-emit_complexwarning()
+emit_complexwarning(void)
{
static PyObject *cls = NULL;
if (cls == NULL) {
@@ -1384,13 +1449,8 @@ emit_complexwarning()
assert(cls != NULL);
Py_DECREF(mod);
}
-#if PY_VERSION_HEX >= 0x02050000
return PyErr_WarnEx(cls,
"Casting complex values to real discards the imaginary part", 1);
-#else
- return PyErr_Warn(cls,
- "Casting complex values to real discards the imaginary part");
-#endif
}
/**begin repeat
@@ -1650,9 +1710,7 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_true_divide, /*nb_true_divide*/
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
-#if PY_VERSION_HEX >= 0x02050000
(unaryfunc)NULL, /*nb_index*/
-#endif
};
/**end repeat**/
@@ -1671,9 +1729,7 @@ add_scalarmath(void)
* Half, Float, Double, LongDouble,
* CFloat, CDouble, CLongDouble#
**/
-#if PY_VERSION_HEX >= 0x02050000
@name@_as_number.nb_index = Py@NAME@ArrType_Type.tp_as_number->nb_index;
-#endif
Py@NAME@ArrType_Type.tp_as_number = &(@name@_as_number);
Py@NAME@ArrType_Type.tp_richcompare = @name@_richcompare;
/**end repeat**/
@@ -1724,7 +1780,6 @@ get_functions(void)
i += 3;
j++;
}
- _basic_half_pow = funcdata[j - 1];
_basic_float_pow = funcdata[j];
_basic_double_pow = funcdata[j + 1];
_basic_longdouble_pow = funcdata[j + 2];
@@ -1759,8 +1814,14 @@ get_functions(void)
}
funcdata = ((PyUFuncObject *)obj)->data;
signatures = ((PyUFuncObject *)obj)->types;
- i = 0;
- j = 0;
+ /*
+ * sqrt ufunc is specialized for double and float loops in
+ * generate_umath.py, the first to go into FLOAT/DOUBLE_sqrt
+ * they have the same signature as the scalar variants so we need to skip
+ * over them
+ */
+ i = 4;
+ j = 2;
while (signatures[i] != NPY_FLOAT) {
i += 2; j++;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 040486a32..d1fc58ffa 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -5,7 +5,6 @@
#include "Python.h"
-#include "npy_config.h"
#ifdef ENABLE_SEPARATE_COMPILATION
#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
#define NO_IMPORT_ARRAY
@@ -15,11 +14,23 @@
#include "numpy/ufuncobject.h"
#include "numpy/npy_math.h"
#include "numpy/halffloat.h"
+#include "lowlevel_strided_loops.h"
#include "npy_pycompat.h"
#include "ufunc_object.h"
+#include <string.h> /* for memchr */
+
+
+/*
+ * include vectorized functions and dispatchers
+ * this file is safe to include also for generic builds
+ * platform specific instructions are either masked via the proprocessor or
+ * runtime detected
+ */
+#include "simd.inc"
+
/*
*****************************************************************************
@@ -27,6 +38,7 @@
*****************************************************************************
*/
+
#define IS_BINARY_REDUCE ((args[0] == args[2])\
&& (steps[0] == steps[2])\
&& (steps[0] == 0))
@@ -545,26 +557,70 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
* #kind = logical_and, logical_or#
* #OP = &&, ||#
* #SC = ==, !=#
+ * #and = 1, 0#
**/
NPY_NO_EXPORT void
BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
if(IS_BINARY_REDUCE) {
- BINARY_REDUCE_LOOP(npy_bool) {
- const npy_bool in2 = *(npy_bool *)ip2;
- io1 = io1 @OP@ in2;
- if (io1 @SC@ 0) {
- break;
+#ifdef HAVE_EMMINTRIN_H
+ /*
+ * stick with our variant for more reliable performance, only known
+ * platform which outperforms it by ~20% is an i7 with glibc 2.17
+ */
+ if (run_reduce_simd_@kind@_BOOL(args, dimensions, steps)) {
+ return;
+ }
+#else
+ /* for now only use libc on 32-bit/non-x86 */
+ if (steps[1] == 1) {
+ npy_bool * op = (npy_bool *)args[0];
+#if @and@
+ /* np.all(), search for a zero (false) */
+ if (*op) {
+ *op = memchr(args[1], 0, dimensions[0]) == NULL;
+ }
+#else
+ /*
+ * np.any(), search for a non-zero (true) via comparing against
+ * zero blocks, memcmp is faster than memchr on SSE4 machines
+ * with glibc >= 2.12 and memchr can only check for equal 1
+ */
+ static const npy_bool zero[4096]; /* zero by C standard */
+ npy_uintp i, n = dimensions[0];
+
+ for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) {
+ *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0;
+ }
+ if (!*op && n - i > 0) {
+ *op = memcmp(&args[1][i], zero, n - i) != 0;
}
+#endif
+ return;
+ }
+#endif
+ else {
+ BINARY_REDUCE_LOOP(npy_bool) {
+ const npy_bool in2 = *(npy_bool *)ip2;
+ io1 = io1 @OP@ in2;
+ if (io1 @SC@ 0) {
+ break;
+ }
+ }
+ *((npy_bool *)iop1) = io1;
}
- *((npy_bool *)iop1) = io1;
}
else {
- BINARY_LOOP {
- const npy_bool in1 = *(npy_bool *)ip1;
- const npy_bool in2 = *(npy_bool *)ip2;
- *((npy_bool *)op1) = in1 @OP@ in2;
+ if (run_binary_simd_@kind@_BOOL(args, dimensions, steps)) {
+ return;
+ }
+ else {
+ BINARY_LOOP {
+ const npy_bool in1 = *(npy_bool *)ip1;
+ const npy_bool in2 = *(npy_bool *)ip2;
+ *((npy_bool *)op1) = in1 @OP@ in2;
+ }
}
}
}
@@ -603,9 +659,14 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
NPY_NO_EXPORT void
BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- npy_bool in1 = *(npy_bool *)ip1;
- *((npy_bool *)op1) = in1 @OP@ 0;
+ if (run_unary_simd_@kind@_BOOL(args, dimensions, steps)) {
+ return;
+ }
+ else {
+ UNARY_LOOP {
+ npy_bool in1 = *(npy_bool *)ip1;
+ *((npy_bool *)op1) = in1 @OP@ 0;
+ }
}
}
/**end repeat**/
@@ -1266,6 +1327,26 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
*****************************************************************************
*/
+/**begin repeat
+ * Float types
+ * #type = npy_float, npy_double#
+ * #TYPE = FLOAT, DOUBLE#
+ * #scalarf = npy_sqrtf, npy_sqrt#
+ */
+
+NPY_NO_EXPORT void
+@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ if (!run_unary_simd_sqrt_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = @scalarf@(in1);
+ }
+ }
+}
+
+/**end repeat**/
+
/**begin repeat
* Float types
@@ -1284,13 +1365,13 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if(IS_BINARY_REDUCE) {
+ if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
io1 @OP@= *(@type@ *)ip2;
}
*((@type@ *)iop1) = io1;
}
- else {
+ else if (!run_binary_simd_@kind@_@TYPE@(args, dimensions, steps)) {
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
@@ -1308,10 +1389,12 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- *((npy_bool *)op1) = in1 @OP@ in2;
+ if (!run_binary_simd_@kind@_@TYPE@(args, dimensions, steps)) {
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ *((npy_bool *)op1) = in1 @OP@ in2;
+ }
}
}
/**end repeat1**/
@@ -1387,11 +1470,13 @@ NPY_NO_EXPORT void
{
/* */
if (IS_BINARY_REDUCE) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
+ if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
+ BINARY_REDUCE_LOOP(@type@) {
+ const @type@ in2 = *(@type@ *)ip2;
+ io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
+ }
+ *((@type@ *)iop1) = io1;
}
- *((@type@ *)iop1) = io1;
}
else {
BINARY_LOOP {
@@ -1457,18 +1542,27 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1*in1;
+ char * margs[] = {args[0], args[0], args[1]};
+ npy_intp msteps[] = {steps[0], steps[0], steps[1]};
+ if (!run_binary_simd_multiply_@TYPE@(margs, dimensions, msteps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *((@type@ *)op1) = in1*in1;
+ }
}
}
NPY_NO_EXPORT void
@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = 1/in1;
+ @type@ one = 1.@c@;
+ char * margs[] = {(char*)&one, args[0], args[1]};
+ npy_intp msteps[] = {0, steps[0], steps[1]};
+ if (!run_binary_simd_divide_@TYPE@(margs, dimensions, msteps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *((@type@ *)op1) = 1/in1;
+ }
}
}
@@ -1492,11 +1586,13 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ tmp = in1 > 0 ? in1 : -in1;
- /* add 0 to clear -0.0 */
- *((@type@ *)op1) = tmp + 0;
+ if (!run_unary_simd_absolute_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ tmp = in1 > 0 ? in1 : -in1;
+ /* add 0 to clear -0.0 */
+ *((@type@ *)op1) = tmp + 0;
+ }
}
}
@@ -1600,7 +1696,7 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if(IS_BINARY_REDUCE) {
+ if (IS_BINARY_REDUCE) {
char *iop1 = args[0];
float io1 = npy_half_to_float(*(npy_half *)iop1);
BINARY_REDUCE_LOOP_INNER {
diff --git a/numpy/core/src/umath/loops.h b/numpy/core/src/umath/loops.h
index c2123459b..7f49f1956 100644
--- a/numpy/core/src/umath/loops.h
+++ b/numpy/core/src/umath/loops.h
@@ -1541,56 +1541,64 @@ ULONGLONG_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NP
*****************************************************************************
*/
+#line 187
+NPY_NO_EXPORT void
+FLOAT_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+#line 187
+NPY_NO_EXPORT void
+DOUBLE_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
-#line 191
+#line 197
-#line 198
+#line 204
NPY_NO_EXPORT void
HALF_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
HALF_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
HALF_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
HALF_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
HALF_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -1601,49 +1609,49 @@ HALF_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U
NPY_NO_EXPORT void
HALF_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_signbit(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
HALF_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
HALF_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
HALF_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
HALF_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
HALF_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -1696,55 +1704,55 @@ HALF_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
#define HALF_true_divide HALF_divide
-#line 191
+#line 197
-#line 198
+#line 204
NPY_NO_EXPORT void
FLOAT_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
FLOAT_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
FLOAT_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
FLOAT_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
FLOAT_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -1755,49 +1763,49 @@ FLOAT_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
NPY_NO_EXPORT void
FLOAT_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_signbit(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
FLOAT_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
FLOAT_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
FLOAT_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
FLOAT_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
FLOAT_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -1850,55 +1858,55 @@ FLOAT_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U
#define FLOAT_true_divide FLOAT_divide
-#line 191
+#line 197
-#line 198
+#line 204
NPY_NO_EXPORT void
DOUBLE_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
DOUBLE_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
DOUBLE_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
DOUBLE_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
DOUBLE_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -1909,49 +1917,49 @@ DOUBLE_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
NPY_NO_EXPORT void
DOUBLE_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_signbit(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
DOUBLE_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
DOUBLE_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
DOUBLE_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
DOUBLE_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
DOUBLE_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2004,55 +2012,55 @@ DOUBLE_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
#define DOUBLE_true_divide DOUBLE_divide
-#line 191
+#line 197
-#line 198
+#line 204
NPY_NO_EXPORT void
LONGDOUBLE_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
LONGDOUBLE_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
LONGDOUBLE_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 198
+#line 204
NPY_NO_EXPORT void
LONGDOUBLE_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 207
+#line 213
NPY_NO_EXPORT void
LONGDOUBLE_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2063,49 +2071,49 @@ LONGDOUBLE_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void
NPY_NO_EXPORT void
LONGDOUBLE_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_signbit(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_copysign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_nextafter(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 221
+#line 227
NPY_NO_EXPORT void
LONGDOUBLE_spacing(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
LONGDOUBLE_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 229
+#line 235
NPY_NO_EXPORT void
LONGDOUBLE_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
LONGDOUBLE_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 237
+#line 243
NPY_NO_EXPORT void
LONGDOUBLE_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2173,14 +2181,14 @@ LONGDOUBLE_ldexp_long(char **args, npy_intp *dimensions, npy_intp *steps, void *
#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi);
#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi);
-#line 310
-
#line 316
+
+#line 322
NPY_NO_EXPORT void
CFLOAT_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 316
+#line 322
NPY_NO_EXPORT void
CFLOAT_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2195,36 +2203,36 @@ CFLOAT_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
NPY_NO_EXPORT void
CFLOAT_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CFLOAT_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CFLOAT_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CFLOAT_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2234,15 +2242,15 @@ CFLOAT_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
NPY_NO_EXPORT void
CFLOAT_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CFLOAT_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CFLOAT_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CFLOAT_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2268,20 +2276,20 @@ CFLOAT__arg(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
NPY_NO_EXPORT void
CFLOAT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CFLOAT_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CFLOAT_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CFLOAT_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CFLOAT_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2289,14 +2297,14 @@ CFLOAT_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
#define CFLOAT_true_divide CFLOAT_divide
-#line 310
-
#line 316
+
+#line 322
NPY_NO_EXPORT void
CDOUBLE_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 316
+#line 322
NPY_NO_EXPORT void
CDOUBLE_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2311,36 +2319,36 @@ CDOUBLE_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
NPY_NO_EXPORT void
CDOUBLE_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CDOUBLE_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CDOUBLE_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CDOUBLE_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2350,15 +2358,15 @@ CDOUBLE_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void *NP
NPY_NO_EXPORT void
CDOUBLE_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CDOUBLE_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CDOUBLE_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CDOUBLE_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2384,20 +2392,20 @@ CDOUBLE__arg(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
NPY_NO_EXPORT void
CDOUBLE_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CDOUBLE_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CDOUBLE_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CDOUBLE_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CDOUBLE_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2405,14 +2413,14 @@ CDOUBLE_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
#define CDOUBLE_true_divide CDOUBLE_divide
-#line 310
-
#line 316
+
+#line 322
NPY_NO_EXPORT void
CLONGDOUBLE_add(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 316
+#line 322
NPY_NO_EXPORT void
CLONGDOUBLE_subtract(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2427,36 +2435,36 @@ CLONGDOUBLE_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
NPY_NO_EXPORT void
CLONGDOUBLE_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 334
+#line 340
NPY_NO_EXPORT void
CLONGDOUBLE_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CLONGDOUBLE_logical_and(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 343
+#line 349
NPY_NO_EXPORT void
CLONGDOUBLE_logical_or(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2466,15 +2474,15 @@ CLONGDOUBLE_logical_xor(char **args, npy_intp *dimensions, npy_intp *steps, void
NPY_NO_EXPORT void
CLONGDOUBLE_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CLONGDOUBLE_isnan(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CLONGDOUBLE_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 357
+#line 363
NPY_NO_EXPORT void
CLONGDOUBLE_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2500,20 +2508,20 @@ CLONGDOUBLE__arg(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_U
NPY_NO_EXPORT void
CLONGDOUBLE_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CLONGDOUBLE_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 386
+#line 392
NPY_NO_EXPORT void
CLONGDOUBLE_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CLONGDOUBLE_fmax(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 394
+#line 400
NPY_NO_EXPORT void
CLONGDOUBLE_fmin(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2544,81 +2552,81 @@ TIMEDELTA_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY
NPY_NO_EXPORT void
TIMEDELTA_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 427
+#line 433
NPY_NO_EXPORT void
DATETIME__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
DATETIME_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 443
+#line 449
NPY_NO_EXPORT void
DATETIME_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 443
+#line 449
NPY_NO_EXPORT void
DATETIME_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 427
+#line 433
NPY_NO_EXPORT void
TIMEDELTA__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 435
+#line 441
NPY_NO_EXPORT void
TIMEDELTA_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 443
+#line 449
NPY_NO_EXPORT void
TIMEDELTA_maximum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 443
+#line 449
NPY_NO_EXPORT void
TIMEDELTA_minimum(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -2683,27 +2691,27 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
*****************************************************************************
*/
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_greater(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_greater_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_less(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-#line 511
+#line 517
NPY_NO_EXPORT void
OBJECT_less_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 87b47a754..a8a58c5de 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -181,6 +181,12 @@ U@TYPE@_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_
*****************************************************************************
*/
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE#
+ */
+NPY_NO_EXPORT void
+@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
/**begin repeat
* Float types
diff --git a/numpy/core/src/umath/operand_flag_tests.c.src b/numpy/core/src/umath/operand_flag_tests.c.src
new file mode 100644
index 000000000..7eaf1fbfc
--- /dev/null
+++ b/numpy/core/src/umath/operand_flag_tests.c.src
@@ -0,0 +1,105 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include <math.h>
+#include <Python.h>
+#include <structmember.h>
+#include <numpy/arrayobject.h>
+#include <numpy/ufuncobject.h>
+#include "numpy/npy_3kcompat.h"
+
+
+static PyMethodDef TestMethods[] = {
+ {NULL, NULL, 0, NULL}
+};
+
+
+static void
+inplace_add(char **args, npy_intp *dimensions, npy_intp *steps, void *data)
+{
+ npy_intp i;
+ npy_intp n = dimensions[0];
+ char *in1 = args[0];
+ char *in2 = args[1];
+ npy_intp in1_step = steps[0];
+ npy_intp in2_step = steps[1];
+
+ for (i = 0; i < n; i++) {
+ (*(long *)in1) = *(long*)in1 + *(long*)in2;
+ in1 += in1_step;
+ in2 += in2_step;
+ }
+}
+
+
+/*This a pointer to the above function*/
+PyUFuncGenericFunction funcs[1] = {&inplace_add};
+
+/* These are the input and return dtypes of logit.*/
+static char types[2] = {NPY_LONG, NPY_LONG};
+
+static void *data[1] = {NULL};
+
+#if defined(NPY_PY3K)
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "operand_flag_tests",
+ NULL,
+ -1,
+ TestMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#define RETVAL m
+PyMODINIT_FUNC PyInit_operand_flag_tests(void)
+{
+#else
+#define RETVAL
+PyMODINIT_FUNC initoperand_flag_tests(void)
+{
+#endif
+ PyObject *m = NULL;
+ PyObject *ufunc;
+
+#if defined(NPY_PY3K)
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule("operand_flag_tests", TestMethods);
+#endif
+ if (m == NULL) {
+ goto fail;
+ }
+
+ import_array();
+ import_umath();
+
+ ufunc = PyUFunc_FromFuncAndData(funcs, data, types, 1, 2, 0,
+ PyUFunc_None, "inplace_add",
+ "inplace_add_docstring", 0);
+
+ /*
+ * Set flags to turn off buffering for first input operand,
+ * so that result can be written back to input operand.
+ */
+ ((PyUFuncObject*)ufunc)->op_flags[0] = NPY_ITER_READWRITE;
+ ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK;
+ PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc);
+
+ return RETVAL;
+
+fail:
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot load operand_flag_tests module.");
+ }
+#if defined(NPY_PY3K)
+ if (m) {
+ Py_DECREF(m);
+ m = NULL;
+ }
+#endif
+ return RETVAL;
+
+}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 444682d57..3f2b94a4a 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -21,7 +21,7 @@
#include <numpy/arrayobject.h>
#include "npy_config.h"
-#include "numpy/npy_3kcompat.h"
+#include "npy_pycompat.h"
#include "lowlevel_strided_loops.h"
#include "reduction.h"
@@ -483,7 +483,8 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
if (op_view == NULL) {
goto fail;
}
- if (PyArray_SIZE(op_view) == 0) {
+ /* empty op_view signals no reduction; but 0-d arrays cannot be empty */
+ if ((PyArray_SIZE(op_view) == 0) || (PyArray_NDIM(operand) == 0)) {
Py_DECREF(op_view);
op_view = NULL;
goto finish;
@@ -513,7 +514,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
NPY_KEEPORDER, casting,
op_flags,
op_dtypes,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
goto fail;
}
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
new file mode 100644
index 000000000..2f1c3055b
--- /dev/null
+++ b/numpy/core/src/umath/simd.inc.src
@@ -0,0 +1,848 @@
+/* -*- c -*- */
+
+/*
+ * This file is for the definitions of simd vectorized operations.
+ *
+ * Currently contains sse2 functions that are built on amd64, x32 or
+ * non-generic builds (CFLAGS=-march=...)
+ * In future it may contain other instruction sets like AVX or NEON detected
+ * at runtime in which case it needs to be included indirectly via a file
+ * compiled with special options (or use gcc target attributes) so the binary
+ * stays portable.
+ */
+
+
+#ifndef __NPY_SIMD_INC
+#define __NPY_SIMD_INC
+
+#include "lowlevel_strided_loops.h"
+#include "npy_config.h"
+/* for NO_FLOATING_POINT_SUPPORT */
+#include "numpy/ufuncobject.h"
+#ifdef HAVE_EMMINTRIN_H
+#include <emmintrin.h>
+#endif
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h> /* for memcpy */
+
+int PyUFunc_getfperr(void);
+void PyUFunc_clearfperr(void);
+
+/*
+ * stride is equal to element size and input and destination are equal or
+ * don't overlap within one register
+ */
+#define IS_BLOCKABLE_UNARY(esize, vsize) \
+ (steps[0] == (esize) && steps[0] == steps[1] && \
+ (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \
+ ((abs(args[1] - args[0]) >= (vsize)) || ((abs(args[1] - args[0]) == 0))))
+
+#define IS_BLOCKABLE_REDUCE(esize, vsize) \
+ (steps[1] == (esize) && abs(args[1] - args[0]) >= (vsize))
+
+#define IS_BLOCKABLE_BINARY(esize, vsize) \
+ (steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \
+ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \
+ npy_is_aligned(args[0], (esize)) && \
+ (abs(args[2] - args[0]) >= (vsize) || abs(args[2] - args[0]) == 0) && \
+ (abs(args[2] - args[1]) >= (vsize) || abs(args[2] - args[1]) >= 0))
+
+#define IS_BLOCKABLE_BINARY_SCALAR1(esize, vsize) \
+ (steps[0] == 0 && steps[1] == steps[2] && steps[2] == (esize) && \
+ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \
+ ((abs(args[2] - args[1]) >= (vsize)) || (abs(args[2] - args[1]) == 0)) && \
+ abs(args[2] - args[0]) >= (esize))
+
+#define IS_BLOCKABLE_BINARY_SCALAR2(esize, vsize) \
+ (steps[1] == 0 && steps[0] == steps[2] && steps[2] == (esize) && \
+ npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[0], (esize)) && \
+ ((abs(args[2] - args[0]) >= (vsize)) || (abs(args[2] - args[0]) == 0)) && \
+ abs(args[2] - args[1]) >= (esize))
+
+#define IS_BLOCKABLE_BINARY_BOOL(esize, vsize) \
+ (steps[0] == (esize) && steps[0] == steps[1] && steps[2] == (1) && \
+ npy_is_aligned(args[1], (esize)) && \
+ npy_is_aligned(args[0], (esize)))
+
+#define IS_BLOCKABLE_BINARY_SCALAR1_BOOL(esize, vsize) \
+ (steps[0] == 0 && steps[1] == (esize) && steps[2] == (1) && \
+ npy_is_aligned(args[1], (esize)))
+
+#define IS_BLOCKABLE_BINARY_SCALAR2_BOOL(esize, vsize) \
+ (steps[0] == (esize) && steps[1] == 0 && steps[2] == (1) && \
+ npy_is_aligned(args[0], (esize)))
+
+/* align var to alignment */
+#define LOOP_BLOCK_ALIGN_VAR(var, type, alignment)\
+ npy_intp i, peel = npy_aligned_block_offset(var, sizeof(type),\
+ alignment, n);\
+ for(i = 0; i < peel; i++)
+
+#define LOOP_BLOCKED(type, vsize)\
+ for(; i < npy_blocked_end(peel, sizeof(type), vsize, n);\
+ i += (vsize / sizeof(type)))
+
+#define LOOP_BLOCKED_END\
+ for (; i < n; i++)
+
+/* fanout two bits to two bytes */
+static const npy_int16 fanout_2[] = {
+ 0x0000,
+ 0x0001,
+ 0x0100,
+ 0x0101,
+};
+
+/* fanout four bits to four bytes */
+static const npy_int32 fanout_4[] = {
+ 0x00000000,
+ 0x00000001,
+ 0x00000100,
+ 0x00000101,
+ 0x00010000,
+ 0x00010001,
+ 0x00010100,
+ 0x00010101,
+ 0x01000000,
+ 0x01000001,
+ 0x01000100,
+ 0x01000101,
+ 0x01010000,
+ 0x01010001,
+ 0x01010100,
+ 0x01010101
+};
+
+
+/*
+ * Dispatcher functions
+ * decide whether the operation can be vectorized and run it
+ * if it was run returns true and false if nothing was done
+ */
+
+/*
+ *****************************************************************************
+ ** FLOAT DISPATCHERS
+ *****************************************************************************
+ */
+
+/**begin repeat
+ * Float types
+ * #type = npy_float, npy_double, npy_longdouble#
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
+ * #vector = 1, 1, 0#
+ */
+
+/**begin repeat1
+ * #func = sqrt, absolute, minimum, maximum#
+ * #check = IS_BLOCKABLE_UNARY, IS_BLOCKABLE_UNARY, IS_BLOCKABLE_REDUCE, IS_BLOCKABLE_REDUCE#
+ * #name = unary, unary, unary_reduce, unary_reduce#
+ */
+
+#if @vector@ && defined HAVE_EMMINTRIN_H
+
+/* prototypes */
+static void
+sse2_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n);
+
+#endif
+
+static NPY_INLINE int
+run_@name@_simd_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if @vector@ && defined HAVE_EMMINTRIN_H
+ if (@check@(sizeof(@type@), 16)) {
+ sse2_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
+/**begin repeat1
+ * Arithmetic
+ * # kind = add, subtract, multiply, divide#
+ */
+
+#if @vector@ && defined HAVE_EMMINTRIN_H
+
+/* prototypes */
+static void
+sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+static void
+sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+static void
+sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+
+#endif
+
+static NPY_INLINE int
+run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if @vector@ && defined HAVE_EMMINTRIN_H
+ @type@ * ip1 = (@type@ *)args[0];
+ @type@ * ip2 = (@type@ *)args[1];
+ @type@ * op = (@type@ *)args[2];
+ npy_intp n = dimensions[0];
+ /* argument one scalar */
+ if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), 16)) {
+ sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+ /* argument two scalar */
+ else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), 16)) {
+ sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+ else if (IS_BLOCKABLE_BINARY(sizeof(@type@), 16)) {
+ sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
+/**begin repeat1
+ * #kind = equal, not_equal, less, less_equal, greater, greater_equal,
+ * logical_and, logical_or#
+ * #simd = 1, 1, 1, 1, 1, 1, 0, 0#
+ */
+
+#if @vector@ && @simd@ && defined HAVE_EMMINTRIN_H
+
+/* prototypes */
+static void
+sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+static void
+sse2_binary_scalar1_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+static void
+sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2,
+ npy_intp n);
+
+#endif
+
+static NPY_INLINE int
+run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if @vector@ && @simd@ && defined HAVE_EMMINTRIN_H
+ @type@ * ip1 = (@type@ *)args[0];
+ @type@ * ip2 = (@type@ *)args[1];
+ npy_bool * op = (npy_bool *)args[2];
+ npy_intp n = dimensions[0];
+ /* argument one scalar */
+ if (IS_BLOCKABLE_BINARY_SCALAR1_BOOL(sizeof(@type@), 16)) {
+ sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+ /* argument two scalar */
+ else if (IS_BLOCKABLE_BINARY_SCALAR2_BOOL(sizeof(@type@), 16)) {
+ sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+ else if (IS_BLOCKABLE_BINARY_BOOL(sizeof(@type@), 16)) {
+ sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+/*
+ *****************************************************************************
+ ** BOOL DISPATCHERS
+ *****************************************************************************
+ */
+
+/**begin repeat
+ * # kind = logical_or, logical_and#
+ */
+
+static void
+sse2_binary_@kind@_BOOL(npy_bool * op, npy_bool * ip1, npy_bool * ip2,
+ npy_intp n);
+
+static NPY_INLINE int
+run_binary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined HAVE_EMMINTRIN_H
+ if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), 16)) {
+ sse2_binary_@kind@_BOOL((npy_bool*)args[2], (npy_bool*)args[0],
+ (npy_bool*)args[1], dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+
+static void
+sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, npy_intp n);
+
+static NPY_INLINE int
+run_reduce_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined HAVE_EMMINTRIN_H
+ if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), 16)) {
+ sse2_reduce_@kind@_BOOL((npy_bool*)args[0], (npy_bool*)args[1],
+ dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat**/
+
+/**begin repeat
+ * # kind = absolute, logical_not#
+ */
+
+static void
+sse2_@kind@_BOOL(npy_bool *, npy_bool *, const npy_intp n);
+
+static NPY_INLINE int
+run_unary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined HAVE_EMMINTRIN_H
+ if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), 16)) {
+ sse2_@kind@_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat**/
+
+#ifdef HAVE_EMMINTRIN_H
+
+/*
+ * Vectorized operations
+ */
+/*
+ *****************************************************************************
+ ** FLOAT LOOPS
+ *****************************************************************************
+ */
+
+/**begin repeat
+* horizontal reductions on a vector
+* # VOP = min, max#
+*/
+
+static NPY_INLINE npy_float sse2_horizontal_@VOP@___m128(__m128 v)
+{
+ npy_float r;
+ __m128 tmp = _mm_movehl_ps(v, v); /* c d ... */
+ __m128 m = _mm_@VOP@_ps(v, tmp); /* m(ac) m(bd) ... */
+ tmp = _mm_shuffle_ps(m, m, _MM_SHUFFLE(1, 1, 1, 1));/* m(bd) m(bd) ... */
+ _mm_store_ss(&r, _mm_@VOP@_ps(tmp, m)); /* m(acbd) ... */
+ return r;
+}
+
+static NPY_INLINE npy_double sse2_horizontal_@VOP@___m128d(__m128d v)
+{
+ npy_double r;
+ __m128d tmp = _mm_unpackhi_pd(v, v); /* b b */
+ _mm_store_sd(&r, _mm_@VOP@_pd(tmp, v)); /* m(ab) m(bb) */
+ return r;
+}
+
+/**end repeat**/
+
+/**begin repeat
+ * #type = npy_float, npy_double#
+ * #TYPE = FLOAT, DOUBLE#
+ * #scalarf = npy_sqrtf, npy_sqrt#
+ * #c = f, #
+ * #vtype = __m128, __m128d#
+ * #vpre = _mm, _mm#
+ * #vsuf = ps, pd#
+ * #vsufs = ss, sd#
+ * #nan = NPY_NANF, NPY_NAN#
+ * #mtype = npy_int32, npy_int16#
+ * #fanout = fanout_4, fanout_2#
+ */
+
+
+/**begin repeat1
+* Arithmetic
+* # kind = add, subtract, multiply, divide#
+* # OP = +, -, *, /#
+* # VOP = add, sub, mul, div#
+*/
+
+static void
+sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ op[i] = ip1[i] @OP@ ip2[i];
+ /* lots of specializations, to squeeze out max performance */
+ if (npy_is_aligned(&ip1[i], 16) && npy_is_aligned(&ip2[i], 16)) {
+ if (ip1 == ip2) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ }
+ else if (npy_is_aligned(&ip1[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else if (npy_is_aligned(&ip2[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else {
+ if (ip1 == ip2) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
+ @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = ip1[i] @OP@ ip2[i];
+ }
+}
+
+
+static void
+sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]);
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ op[i] = ip1[0] @OP@ ip2[i];
+ if (npy_is_aligned(&ip2[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = ip1[0] @OP@ ip2[i];
+ }
+}
+
+
+static void
+sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]);
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ op[i] = ip1[i] @OP@ ip2[0];
+ if (npy_is_aligned(&ip1[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @vpre@_store_@vsuf@(&op[i], c);
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = ip1[i] @OP@ ip2[0];
+ }
+}
+
+/**end repeat1**/
+
+/**begin repeat1
+ * #kind = equal, not_equal, less, less_equal, greater, greater_equal#
+ * #OP = ==, !=, <, <=, >, >=#
+ * #VOP = cmpeq, cmpneq, cmplt, cmple, cmpgt, cmpge#
+*/
+
+/* sets invalid fpu flag on QNaN for consistency with packed compare */
+static NPY_INLINE int
+sse2_ordered_cmp_@kind@_@TYPE@(const @type@ a, const @type@ b)
+{
+ @type@ tmp;
+ @vtype@ v = @vpre@_@VOP@_@vsufs@(@vpre@_load_@vsufs@(&a),
+ @vpre@_load_@vsufs@(&b));
+ @vpre@_store_@vsufs@(&tmp, v);
+ return sizeof(@type@) == 4 ?
+ (*(npy_uint32 *)&tmp) & 1 : (*(npy_uint64 *)&tmp) & 1;
+}
+
+static void
+sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ npy_bool * r;
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
+ }
+ r = &op[i];
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @mtype@ ir = @fanout@[@vpre@_movemask_@vsuf@(c)];
+ /* may be unaligned */
+ memcpy(r, &ir, sizeof(ir));
+ r += sizeof(ir);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
+ }
+}
+
+
+static void
+sse2_binary_scalar1_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ npy_bool * r;
+ @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]);
+ LOOP_BLOCK_ALIGN_VAR(ip2, @type@, 16) {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[0], ip2[i]);
+ }
+ r = &op[i];
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @mtype@ ir = @fanout@[@vpre@_movemask_@vsuf@(c)];
+ /* may be unaligned */
+ memcpy(r, &ir, sizeof(ir));
+ r += sizeof(ir);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[0], ip2[i]);
+ }
+}
+
+
+static void
+sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
+{
+ npy_bool * r;
+ @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]);
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[0]);
+ }
+ r = &op[i];
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ @vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
+ @mtype@ ir = @fanout@[@vpre@_movemask_@vsuf@(c)];
+ /* may be unaligned */
+ memcpy(r, &ir, sizeof(ir));
+ r += sizeof(ir);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[0]);
+ }
+}
+/**end repeat1**/
+
+static void
+sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
+{
+ /* align output to 16 bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ op[i] = @scalarf@(ip[i]);
+ }
+ assert(npy_is_aligned(&op[i], 16));
+ if (npy_is_aligned(&ip[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ d = @vpre@_load_@vsuf@(&ip[i]);
+ @vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ d = @vpre@_loadu_@vsuf@(&ip[i]);
+ @vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = @scalarf@(ip[i]);
+ }
+}
+
+
+static void
+sse2_absolute_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
+{
+ /*
+ * get 0x7FFFFFFF mask (everything but signbit set)
+ * float & ~mask will remove the sign
+ * this is equivalent to how the compiler implements fabs on amd64
+ */
+ const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@);
+
+ /* align output to 16 bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ const @type@ tmp = ip[i] > 0 ? ip[i]: -ip[i];
+ /* add 0 to clear -0.0 */
+ op[i] = tmp + 0;
+ }
+ assert(npy_is_aligned(&op[i], 16));
+ if (npy_is_aligned(&ip[i], 16)) {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip[i]);
+ @vpre@_store_@vsuf@(&op[i], @vpre@_andnot_@vsuf@(mask, a));
+ }
+ }
+ else {
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_loadu_@vsuf@(&ip[i]);
+ @vpre@_store_@vsuf@(&op[i], @vpre@_andnot_@vsuf@(mask, a));
+ }
+ }
+ LOOP_BLOCKED_END {
+ const @type@ tmp = ip[i] > 0 ? ip[i]: -ip[i];
+ /* add 0 to clear -0.0 */
+ op[i] = tmp + 0;
+ }
+}
+
+
+/**begin repeat1
+ * #kind = maximum, minimum#
+ * #VOP = max, min#
+ * #OP = >=, <=#
+ **/
+/* arguments swapped as unary reduce has the swapped compared to unary */
+static void
+sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(ip, @type@, 16) {
+ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
+ }
+ assert(npy_is_aligned(&ip[i], 16));
+ if (i + 2 * 16 / sizeof(@type@) <= n) {
+ /* load the first elements */
+ @vtype@ c = @vpre@_load_@vsuf@((@type@*)&ip[i]);
+#ifdef NO_FLOATING_POINT_SUPPORT
+ @vtype@ cnan = @vpre@_cmpneq_@vsuf@(c, c);
+#else
+ /* minps/minpd will set invalid flag if nan is encountered */
+ PyUFunc_clearfperr();
+#endif
+ i += 16 / sizeof(@type@);
+
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ v = @vpre@_load_@vsuf@((@type@*)&ip[i]);
+ c = @vpre@_@VOP@_@vsuf@(c, v);
+#ifdef NO_FLOATING_POINT_SUPPORT
+ /* check for nan, breaking the loop makes non nan case slow */
+ cnan = @vpre@_or_@vsuf@(@vpre@_cmpneq_@vsuf@(v, v), cnan);
+ }
+
+ if (@vpre@_movemask_@vsuf@(cnan)) {
+ *op = @nan@;
+ return;
+ }
+#else
+ }
+#endif
+ {
+ @type@ tmp = sse2_horizontal_@VOP@_@vtype@(c);
+ if (PyUFunc_getfperr() & UFUNC_FPE_INVALID)
+ *op = @nan@;
+ else
+ *op = (*op @OP@ tmp || npy_isnan(*op)) ? *op : tmp;
+ }
+ }
+ LOOP_BLOCKED_END {
+ *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
+ }
+}
+/**end repeat1**/
+
+/**end repeat**/
+
+/*
+ *****************************************************************************
+ ** BOOL LOOPS
+ *****************************************************************************
+ */
+
+/**begin repeat
+ * # kind = logical_or, logical_and#
+ * # and = 0, 1#
+ * # op = ||, &&#
+ * # sc = !=, ==#
+ * # vpre = _mm*2#
+ * # vsuf = si128*2#
+ * # vtype = __m128i*2#
+ * # type = npy_bool*2#
+ * # vload = _mm_load_si128*2#
+ * # vloadu = _mm_loadu_si128*2#
+ * # vstore = _mm_store_si128*2#
+ */
+
+/*
+ * convert any bit set to boolean true so vectorized and normal operations are
+ * consistent, should not be required if bool is used correctly everywhere but
+ * you never know
+ */
+#if !@and@
+static NPY_INLINE @vtype@ byte_to_true(@vtype@ v)
+{
+ const @vtype@ zero = @vpre@_setzero_@vsuf@();
+ const @vtype@ truemask = @vpre@_set1_epi8(1 == 1);
+ /* get 0xFF for zeros */
+ @vtype@ tmp = @vpre@_cmpeq_epi8(v, zero);
+ /* filled with 0xFF/0x00, negate and mask to boolean true */
+ return @vpre@_andnot_@vsuf@(tmp, truemask);
+}
+#endif
+
+static void
+sse2_binary_@kind@_BOOL(npy_bool * op, npy_bool * ip1, npy_bool * ip2, npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ op[i] = ip1[i] @op@ ip2[i];
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vloadu@((@vtype@*)&ip1[i]);
+ @vtype@ b = @vloadu@((@vtype@*)&ip2[i]);
+#if @and@
+ const @vtype@ zero = @vpre@_setzero_@vsuf@();
+ /* get 0xFF for non zeros*/
+ @vtype@ tmp = @vpre@_cmpeq_epi8(a, zero);
+ /* andnot -> 0x00 for zeros xFF for non zeros, & with ip2 */
+ tmp = @vpre@_andnot_@vsuf@(tmp, b);
+#else
+ @vtype@ tmp = @vpre@_or_@vsuf@(a, b);
+#endif
+
+ @vstore@((@vtype@*)&op[i], byte_to_true(tmp));
+ }
+ LOOP_BLOCKED_END {
+ op[i] = (ip1[i] @op@ ip2[i]);
+ }
+}
+
+
+static void
+sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, const npy_intp n)
+{
+ const @vtype@ zero = @vpre@_setzero_@vsuf@();
+ LOOP_BLOCK_ALIGN_VAR(ip, npy_bool, 16) {
+ *op = *op @op@ ip[i];
+ if (*op @sc@ 0) {
+ return;
+ }
+ }
+ /* unrolled once to replace a slow movmsk with a fast pmaxb */
+ LOOP_BLOCKED(npy_bool, 32) {
+ @vtype@ v = @vload@((@vtype@*)&ip[i]);
+ @vtype@ v2 = @vload@((@vtype@*)&ip[i + 16]);
+ v = @vpre@_cmpeq_epi8(v, zero);
+ v2 = @vpre@_cmpeq_epi8(v2, zero);
+#if @and@
+ if ((@vpre@_movemask_epi8(@vpre@_max_epu8(v, v2)) != 0)) {
+ *op = 0;
+#else
+ if ((@vpre@_movemask_epi8(@vpre@_min_epu8(v, v2)) != 0xFFFF)) {
+ *op = 1;
+#endif
+ return;
+ }
+ }
+ LOOP_BLOCKED_END {
+ *op = *op @op@ ip[i];
+ if (*op @sc@ 0) {
+ return;
+ }
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+ * # kind = absolute, logical_not#
+ * # op = !=, ==#
+ * # not = 0, 1#
+ * # vpre = _mm*2#
+ * # vsuf = si128*2#
+ * # vtype = __m128i*2#
+ * # type = npy_bool*2#
+ * # vloadu = _mm_loadu_si128*2#
+ * # vstore = _mm_store_si128*2#
+ */
+
+static void
+sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ op[i] = (ip[i] @op@ 0);
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vloadu@((@vtype@*)&ip[i]);
+#if @not@
+ const @vtype@ zero = @vpre@_setzero_@vsuf@();
+ const @vtype@ truemask = @vpre@_set1_epi8(1 == 1);
+ /* equivalent to byte_to_true but can skip the negation */
+ a = @vpre@_cmpeq_epi8(a, zero);
+ a = @vpre@_and_@vsuf@(a, truemask);
+#else
+ /* abs is kind of pointless but maybe its used for byte_to_true */
+ a = byte_to_true(a);
+#endif
+ @vstore@((@vtype@*)&op[i], a);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = (ip[i] @op@ 0);
+ }
+}
+
+/**end repeat**/
+
+#endif /* HAVE_EMMINTRIN_H */
+
+#endif
diff --git a/numpy/core/src/umath/struct_ufunc_test.c.src b/numpy/core/src/umath/struct_ufunc_test.c.src
new file mode 100644
index 000000000..517925570
--- /dev/null
+++ b/numpy/core/src/umath/struct_ufunc_test.c.src
@@ -0,0 +1,122 @@
+#include "Python.h"
+#include "math.h"
+#include "numpy/ndarraytypes.h"
+#include "numpy/ufuncobject.h"
+#include "numpy/npy_3kcompat.h"
+
+
+/*
+ * struct_ufunc_test.c
+ * This is the C code for creating your own
+ * Numpy ufunc for a structured array dtype.
+ *
+ * Details explaining the Python-C API can be found under
+ * 'Extending and Embedding' and 'Python/C API' at
+ * docs.python.org .
+ */
+
+static PyMethodDef StructUfuncTestMethods[] = {
+ {NULL, NULL, 0, NULL}
+};
+
+/* The loop definition must precede the PyMODINIT_FUNC. */
+
+static void add_uint64_triplet(char **args, npy_intp *dimensions,
+ npy_intp* steps, void* data)
+{
+ npy_intp i;
+ npy_intp is1=steps[0];
+ npy_intp is2=steps[1];
+ npy_intp os=steps[2];
+ npy_intp n=dimensions[0];
+ npy_uint64 *x, *y, *z;
+
+ char *i1=args[0];
+ char *i2=args[1];
+ char *op=args[2];
+
+ for (i = 0; i < n; i++) {
+
+ x = (npy_uint64*)i1;
+ y = (npy_uint64*)i2;
+ z = (npy_uint64*)op;
+
+ z[0] = x[0] + y[0];
+ z[1] = x[1] + y[1];
+ z[2] = x[2] + y[2];
+
+ i1 += is1;
+ i2 += is2;
+ op += os;
+ }
+}
+
+#if defined(NPY_PY3K)
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "struct_ufunc_test",
+ NULL,
+ -1,
+ StructUfuncTestMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#endif
+
+#if defined(NPY_PY3K)
+PyMODINIT_FUNC PyInit_struct_ufunc_test(void)
+#else
+PyMODINIT_FUNC initstruct_ufunc_test(void)
+#endif
+{
+ PyObject *m, *add_triplet, *d;
+ PyObject *dtype_dict;
+ PyArray_Descr *dtype;
+ PyArray_Descr *dtypes[3];
+
+#if defined(NPY_PY3K)
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule("struct_ufunc_test", StructUfuncTestMethods);
+#endif
+
+ if (m == NULL) {
+#if defined(NPY_PY3K)
+ return NULL;
+#else
+ return;
+#endif
+ }
+
+ import_array();
+ import_umath();
+
+ add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1,
+ PyUFunc_None, "add_triplet",
+ "add_triplet_docstring", 0);
+
+ dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]",
+ "f0", "u8", "f1", "u8", "f2", "u8");
+ PyArray_DescrConverter(dtype_dict, &dtype);
+ Py_DECREF(dtype_dict);
+
+ dtypes[0] = dtype;
+ dtypes[1] = dtype;
+ dtypes[2] = dtype;
+
+ PyUFunc_RegisterLoopForDescr(add_triplet,
+ dtype,
+ &add_uint64_triplet,
+ dtypes,
+ NULL);
+
+ d = PyModule_GetDict(m);
+
+ PyDict_SetItemString(d, "add_triplet", add_triplet);
+ Py_DECREF(add_triplet);
+#if defined(NPY_PY3K)
+ return m;
+#endif
+}
diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src
new file mode 100644
index 000000000..fa3f903b4
--- /dev/null
+++ b/numpy/core/src/umath/test_rational.c.src
@@ -0,0 +1,1389 @@
+/* Fixed size rational numbers exposed to Python */
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include <math.h>
+#include <Python.h>
+#include <structmember.h>
+#include <numpy/arrayobject.h>
+#include <numpy/ufuncobject.h>
+#include <numpy/npy_3kcompat.h>
+
+/* Relevant arithmetic exceptions */
+
+/* Uncomment the following line to work around a bug in numpy */
+/* #define ACQUIRE_GIL */
+
+static void
+set_overflow(void) {
+#ifdef ACQUIRE_GIL
+ /* Need to grab the GIL to dodge a bug in numpy */
+ PyGILState_STATE state = PyGILState_Ensure();
+#endif
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_OverflowError,
+ "overflow in rational arithmetic");
+ }
+#ifdef ACQUIRE_GIL
+ PyGILState_Release(state);
+#endif
+}
+
+static void
+set_zero_divide(void) {
+#ifdef ACQUIRE_GIL
+ /* Need to grab the GIL to dodge a bug in numpy */
+ PyGILState_STATE state = PyGILState_Ensure();
+#endif
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ZeroDivisionError,
+ "zero divide in rational arithmetic");
+ }
+#ifdef ACQUIRE_GIL
+ PyGILState_Release(state);
+#endif
+}
+
+/* Integer arithmetic utilities */
+
+static NPY_INLINE npy_int32
+safe_neg(npy_int32 x) {
+ if (x==(npy_int32)1<<31) {
+ set_overflow();
+ }
+ return -x;
+}
+
+static NPY_INLINE npy_int32
+safe_abs32(npy_int32 x) {
+ npy_int32 nx;
+ if (x>=0) {
+ return x;
+ }
+ nx = -x;
+ if (nx<0) {
+ set_overflow();
+ }
+ return nx;
+}
+
+static NPY_INLINE npy_int64
+safe_abs64(npy_int64 x) {
+ npy_int64 nx;
+ if (x>=0) {
+ return x;
+ }
+ nx = -x;
+ if (nx<0) {
+ set_overflow();
+ }
+ return nx;
+}
+
+static NPY_INLINE npy_int64
+gcd(npy_int64 x, npy_int64 y) {
+ x = safe_abs64(x);
+ y = safe_abs64(y);
+ if (x < y) {
+ npy_int64 t = x;
+ x = y;
+ y = t;
+ }
+ while (y) {
+ npy_int64 t;
+ x = x%y;
+ t = x;
+ x = y;
+ y = t;
+ }
+ return x;
+}
+
+static NPY_INLINE npy_int64
+lcm(npy_int64 x, npy_int64 y) {
+ npy_int64 lcm;
+ if (!x || !y) {
+ return 0;
+ }
+ x /= gcd(x,y);
+ lcm = x*y;
+ if (lcm/y!=x) {
+ set_overflow();
+ }
+ return safe_abs64(lcm);
+}
+
+/* Fixed precision rational numbers */
+
+typedef struct {
+ /* numerator */
+ npy_int32 n;
+ /*
+ * denominator minus one: numpy.zeros() uses memset(0) for non-object
+ * types, so need to ensure that rational(0) has all zero bytes
+ */
+ npy_int32 dmm;
+} rational;
+
+static NPY_INLINE rational
+make_rational_int(npy_int64 n) {
+ rational r = {(npy_int32)n,0};
+ if (r.n != n) {
+ set_overflow();
+ }
+ return r;
+}
+
+static rational
+make_rational_slow(npy_int64 n_, npy_int64 d_) {
+ rational r = {0};
+ if (!d_) {
+ set_zero_divide();
+ }
+ else {
+ npy_int64 g = gcd(n_,d_);
+ npy_int32 d;
+ n_ /= g;
+ d_ /= g;
+ r.n = (npy_int32)n_;
+ d = (npy_int32)d_;
+ if (r.n!=n_ || d!=d_) {
+ set_overflow();
+ }
+ else {
+ if (d <= 0) {
+ d = -d;
+ r.n = safe_neg(r.n);
+ }
+ r.dmm = d-1;
+ }
+ }
+ return r;
+}
+
+static NPY_INLINE npy_int32
+d(rational r) {
+ return r.dmm+1;
+}
+
+/* Assumes d_ > 0 */
+static rational
+make_rational_fast(npy_int64 n_, npy_int64 d_) {
+ npy_int64 g = gcd(n_,d_);
+ rational r;
+ n_ /= g;
+ d_ /= g;
+ r.n = (npy_int32)n_;
+ r.dmm = (npy_int32)(d_-1);
+ if (r.n!=n_ || r.dmm+1!=d_) {
+ set_overflow();
+ }
+ return r;
+}
+
+static NPY_INLINE rational
+rational_negative(rational r) {
+ rational x;
+ x.n = safe_neg(r.n);
+ x.dmm = r.dmm;
+ return x;
+}
+
+static NPY_INLINE rational
+rational_add(rational x, rational y) {
+ /*
+ * Note that the numerator computation can never overflow int128_t,
+ * since each term is strictly under 2**128/4 (since d > 0).
+ */
+ return make_rational_fast((npy_int64)x.n*d(y)+(npy_int64)d(x)*y.n,
+ (npy_int64)d(x)*d(y));
+}
+
+static NPY_INLINE rational
+rational_subtract(rational x, rational y) {
+ /* We're safe from overflow as with + */
+ return make_rational_fast((npy_int64)x.n*d(y)-(npy_int64)d(x)*y.n,
+ (npy_int64)d(x)*d(y));
+}
+
+static NPY_INLINE rational
+rational_multiply(rational x, rational y) {
+ /* We're safe from overflow as with + */
+ return make_rational_fast((npy_int64)x.n*y.n,(npy_int64)d(x)*d(y));
+}
+
+static NPY_INLINE rational
+rational_divide(rational x, rational y) {
+ return make_rational_slow((npy_int64)x.n*d(y),(npy_int64)d(x)*y.n);
+}
+
+static NPY_INLINE npy_int64
+rational_floor(rational x) {
+ /* Always round down */
+ if (x.n>=0) {
+ return x.n/d(x);
+ }
+ /*
+ * This can be done without casting up to 64 bits, but it requires
+ * working out all the sign cases
+ */
+ return -((-(npy_int64)x.n+d(x)-1)/d(x));
+}
+
+static NPY_INLINE npy_int64
+rational_ceil(rational x) {
+ return -rational_floor(rational_negative(x));
+}
+
+static NPY_INLINE rational
+rational_remainder(rational x, rational y) {
+ return rational_subtract(x, rational_multiply(y,make_rational_int(
+ rational_floor(rational_divide(x,y)))));
+}
+
+static NPY_INLINE rational
+rational_abs(rational x) {
+ rational y;
+ y.n = safe_abs32(x.n);
+ y.dmm = x.dmm;
+ return y;
+}
+
+static NPY_INLINE npy_int64
+rational_rint(rational x) {
+ /*
+ * Round towards nearest integer, moving exact half integers towards
+ * zero
+ */
+ npy_int32 d_ = d(x);
+ return (2*(npy_int64)x.n+(x.n<0?-d_:d_))/(2*(npy_int64)d_);
+}
+
+static NPY_INLINE int
+rational_sign(rational x) {
+ return x.n<0?-1:x.n==0?0:1;
+}
+
+static NPY_INLINE rational
+rational_inverse(rational x) {
+ rational y = {0};
+ if (!x.n) {
+ set_zero_divide();
+ }
+ else {
+ npy_int32 d_;
+ y.n = d(x);
+ d_ = x.n;
+ if (d_ <= 0) {
+ d_ = safe_neg(d_);
+ y.n = -y.n;
+ }
+ y.dmm = d_-1;
+ }
+ return y;
+}
+
+static NPY_INLINE int
+rational_eq(rational x, rational y) {
+ /*
+ * Since we enforce d > 0, and store fractions in reduced form,
+ * equality is easy.
+ */
+ return x.n==y.n && x.dmm==y.dmm;
+}
+
+static NPY_INLINE int
+rational_ne(rational x, rational y) {
+ return !rational_eq(x,y);
+}
+
+static NPY_INLINE int
+rational_lt(rational x, rational y) {
+ return (npy_int64)x.n*d(y) < (npy_int64)y.n*d(x);
+}
+
+static NPY_INLINE int
+rational_gt(rational x, rational y) {
+ return rational_lt(y,x);
+}
+
+static NPY_INLINE int
+rational_le(rational x, rational y) {
+ return !rational_lt(y,x);
+}
+
+static NPY_INLINE int
+rational_ge(rational x, rational y) {
+ return !rational_lt(x,y);
+}
+
+static NPY_INLINE npy_int32
+rational_int(rational x) {
+ return x.n/d(x);
+}
+
+static NPY_INLINE double
+rational_double(rational x) {
+ return (double)x.n/d(x);
+}
+
+static NPY_INLINE int
+rational_nonzero(rational x) {
+ return x.n!=0;
+}
+
+static int
+scan_rational(const char** s, rational* x) {
+ long n,d;
+ int offset;
+ const char* ss;
+ if (sscanf(*s,"%ld%n",&n,&offset)<=0) {
+ return 0;
+ }
+ ss = *s+offset;
+ if (*ss!='/') {
+ *s = ss;
+ *x = make_rational_int(n);
+ return 1;
+ }
+ ss++;
+ if (sscanf(ss,"%ld%n",&d,&offset)<=0 || d<=0) {
+ return 0;
+ }
+ *s = ss+offset;
+ *x = make_rational_slow(n,d);
+ return 1;
+}
+
+/* Expose rational to Python as a numpy scalar */
+
+typedef struct {
+ PyObject_HEAD
+ rational r;
+} PyRational;
+
+static PyTypeObject PyRational_Type;
+
+static NPY_INLINE int
+PyRational_Check(PyObject* object) {
+ return PyObject_IsInstance(object,(PyObject*)&PyRational_Type);
+}
+
+static PyObject*
+PyRational_FromRational(rational x) {
+ PyRational* p = (PyRational*)PyRational_Type.tp_alloc(&PyRational_Type,0);
+ if (p) {
+ p->r = x;
+ }
+ return (PyObject*)p;
+}
+
+static PyObject*
+pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
+ Py_ssize_t size;
+ PyObject* x[2];
+ long n[2]={0,1};
+ int i;
+ rational r;
+ if (kwds && PyDict_Size(kwds)) {
+ PyErr_SetString(PyExc_TypeError,
+ "constructor takes no keyword arguments");
+ return 0;
+ }
+ size = PyTuple_GET_SIZE(args);
+ if (size>2) {
+ PyErr_SetString(PyExc_TypeError,
+ "expected rational or numerator and optional denominator");
+ return 0;
+ }
+ x[0] = PyTuple_GET_ITEM(args,0);
+ x[1] = PyTuple_GET_ITEM(args,1);
+ if (size==1) {
+ if (PyRational_Check(x[0])) {
+ Py_INCREF(x[0]);
+ return x[0];
+ }
+ else if (PyString_Check(x[0])) {
+ const char* s = PyString_AS_STRING(x[0]);
+ rational x;
+ if (scan_rational(&s,&x)) {
+ const char* p;
+ for (p = s; *p; p++) {
+ if (!isspace(*p)) {
+ goto bad;
+ }
+ }
+ return PyRational_FromRational(x);
+ }
+ bad:
+ PyErr_Format(PyExc_ValueError,
+ "invalid rational literal '%s'",s);
+ return 0;
+ }
+ }
+ for (i=0;i<size;i++) {
+ PyObject* y;
+ int eq;
+ n[i] = PyInt_AsLong(x[i]);
+ if (n[i]==-1 && PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Format(PyExc_TypeError,
+ "expected integer %s, got %s",
+ (i ? "denominator" : "numerator"),
+ x[i]->ob_type->tp_name);
+ }
+ return 0;
+ }
+ /* Check that we had an exact integer */
+ y = PyInt_FromLong(n[i]);
+ if (!y) {
+ return 0;
+ }
+ eq = PyObject_RichCompareBool(x[i],y,Py_EQ);
+ Py_DECREF(y);
+ if (eq<0) {
+ return 0;
+ }
+ if (!eq) {
+ PyErr_Format(PyExc_TypeError,
+ "expected integer %s, got %s",
+ (i ? "denominator" : "numerator"),
+ x[i]->ob_type->tp_name);
+ return 0;
+ }
+ }
+ r = make_rational_slow(n[0],n[1]);
+ if (PyErr_Occurred()) {
+ return 0;
+ }
+ return PyRational_FromRational(r);
+}
+
+/*
+ * Returns Py_NotImplemented on most conversion failures, or raises an
+ * overflow error for too long ints
+ */
+#define AS_RATIONAL(dst,object) \
+ { \
+ dst.n = 0; \
+ if (PyRational_Check(object)) { \
+ dst = ((PyRational*)object)->r; \
+ } \
+ else { \
+ PyObject* y_; \
+ int eq_; \
+ long n_ = PyInt_AsLong(object); \
+ if (n_==-1 && PyErr_Occurred()) { \
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) { \
+ PyErr_Clear(); \
+ Py_INCREF(Py_NotImplemented); \
+ return Py_NotImplemented; \
+ } \
+ return 0; \
+ } \
+ y_ = PyInt_FromLong(n_); \
+ if (!y_) { \
+ return 0; \
+ } \
+ eq_ = PyObject_RichCompareBool(object,y_,Py_EQ); \
+ Py_DECREF(y_); \
+ if (eq_<0) { \
+ return 0; \
+ } \
+ if (!eq_) { \
+ Py_INCREF(Py_NotImplemented); \
+ return Py_NotImplemented; \
+ } \
+ dst = make_rational_int(n_); \
+ } \
+ }
+
+static PyObject*
+pyrational_richcompare(PyObject* a, PyObject* b, int op) {
+ rational x, y;
+ int result = 0;
+ AS_RATIONAL(x,a);
+ AS_RATIONAL(y,b);
+ #define OP(py,op) case py: result = rational_##op(x,y); break;
+ switch (op) {
+ OP(Py_LT,lt)
+ OP(Py_LE,le)
+ OP(Py_EQ,eq)
+ OP(Py_NE,ne)
+ OP(Py_GT,gt)
+ OP(Py_GE,ge)
+ };
+ #undef OP
+ return PyBool_FromLong(result);
+}
+
+static PyObject*
+pyrational_repr(PyObject* self) {
+ rational x = ((PyRational*)self)->r;
+ if (d(x)!=1) {
+ return PyUString_FromFormat(
+ "rational(%ld,%ld)",(long)x.n,(long)d(x));
+ }
+ else {
+ return PyUString_FromFormat(
+ "rational(%ld)",(long)x.n);
+ }
+}
+
+static PyObject*
+pyrational_str(PyObject* self) {
+ rational x = ((PyRational*)self)->r;
+ if (d(x)!=1) {
+ return PyString_FromFormat(
+ "%ld/%ld",(long)x.n,(long)d(x));
+ }
+ else {
+ return PyString_FromFormat(
+ "%ld",(long)x.n);
+ }
+}
+
+static long
+pyrational_hash(PyObject* self) {
+ rational x = ((PyRational*)self)->r;
+ /* Use a fairly weak hash as Python expects */
+ long h = 131071*x.n+524287*x.dmm;
+ /* Never return the special error value -1 */
+ return h==-1?2:h;
+}
+
+#define RATIONAL_BINOP_2(name,exp) \
+ static PyObject* \
+ pyrational_##name(PyObject* a, PyObject* b) { \
+ rational x, y, z; \
+ AS_RATIONAL(x,a); \
+ AS_RATIONAL(y,b); \
+ z = exp; \
+ if (PyErr_Occurred()) { \
+ return 0; \
+ } \
+ return PyRational_FromRational(z); \
+ }
+#define RATIONAL_BINOP(name) RATIONAL_BINOP_2(name,rational_##name(x,y))
+RATIONAL_BINOP(add)
+RATIONAL_BINOP(subtract)
+RATIONAL_BINOP(multiply)
+RATIONAL_BINOP(divide)
+RATIONAL_BINOP(remainder)
+RATIONAL_BINOP_2(floor_divide,
+ make_rational_int(rational_floor(rational_divide(x,y))))
+
+#define RATIONAL_UNOP(name,type,exp,convert) \
+ static PyObject* \
+ pyrational_##name(PyObject* self) { \
+ rational x = ((PyRational*)self)->r; \
+ type y = exp; \
+ if (PyErr_Occurred()) { \
+ return 0; \
+ } \
+ return convert(y); \
+ }
+RATIONAL_UNOP(negative,rational,rational_negative(x),PyRational_FromRational)
+RATIONAL_UNOP(absolute,rational,rational_abs(x),PyRational_FromRational)
+RATIONAL_UNOP(int,long,rational_int(x),PyInt_FromLong)
+RATIONAL_UNOP(float,double,rational_double(x),PyFloat_FromDouble)
+
+static PyObject*
+pyrational_positive(PyObject* self) {
+ Py_INCREF(self);
+ return self;
+}
+
+static int
+pyrational_nonzero(PyObject* self) {
+ rational x = ((PyRational*)self)->r;
+ return rational_nonzero(x);
+}
+
+static PyNumberMethods pyrational_as_number = {
+ pyrational_add, /* nb_add */
+ pyrational_subtract, /* nb_subtract */
+ pyrational_multiply, /* nb_multiply */
+ pyrational_divide, /* nb_divide */
+ pyrational_remainder, /* nb_remainder */
+ 0, /* nb_divmod */
+ 0, /* nb_power */
+ pyrational_negative, /* nb_negative */
+ pyrational_positive, /* nb_positive */
+ pyrational_absolute, /* nb_absolute */
+ pyrational_nonzero, /* nb_nonzero */
+ 0, /* nb_invert */
+ 0, /* nb_lshift */
+ 0, /* nb_rshift */
+ 0, /* nb_and */
+ 0, /* nb_xor */
+ 0, /* nb_or */
+ 0, /* nb_coerce */
+ pyrational_int, /* nb_int */
+ pyrational_int, /* nb_long */
+ pyrational_float, /* nb_float */
+ 0, /* nb_oct */
+ 0, /* nb_hex */
+
+ 0, /* nb_inplace_add */
+ 0, /* nb_inplace_subtract */
+ 0, /* nb_inplace_multiply */
+ 0, /* nb_inplace_divide */
+ 0, /* nb_inplace_remainder */
+ 0, /* nb_inplace_power */
+ 0, /* nb_inplace_lshift */
+ 0, /* nb_inplace_rshift */
+ 0, /* nb_inplace_and */
+ 0, /* nb_inplace_xor */
+ 0, /* nb_inplace_or */
+
+ pyrational_floor_divide, /* nb_floor_divide */
+ pyrational_divide, /* nb_true_divide */
+ 0, /* nb_inplace_floor_divide */
+ 0, /* nb_inplace_true_divide */
+ 0, /* nb_index */
+};
+
+static PyObject*
+pyrational_n(PyObject* self, void* closure) {
+ return PyInt_FromLong(((PyRational*)self)->r.n);
+}
+
+static PyObject*
+pyrational_d(PyObject* self, void* closure) {
+ return PyInt_FromLong(d(((PyRational*)self)->r));
+}
+
+static PyGetSetDef pyrational_getset[] = {
+ {(char*)"n",pyrational_n,0,(char*)"numerator",0},
+ {(char*)"d",pyrational_d,0,(char*)"denominator",0},
+ {0} /* sentinel */
+};
+
+static PyTypeObject PyRational_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+#endif
+ "rational", /* tp_name */
+ sizeof(PyRational), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ pyrational_repr, /* tp_repr */
+ &pyrational_as_number, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ pyrational_hash, /* tp_hash */
+ 0, /* tp_call */
+ pyrational_str, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+ "Fixed precision rational numbers", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ pyrational_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ pyrational_getset, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ pyrational_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
+ 0, /* tp_version_tag */
+};
+
+/* Numpy support */
+
+static PyObject*
+npyrational_getitem(void* data, void* arr) {
+ rational r;
+ memcpy(&r,data,sizeof(rational));
+ return PyRational_FromRational(r);
+}
+
+static int
+npyrational_setitem(PyObject* item, void* data, void* arr) {
+ rational r;
+ if (PyRational_Check(item)) {
+ r = ((PyRational*)item)->r;
+ }
+ else {
+ long n = PyInt_AsLong(item);
+ PyObject* y;
+ int eq;
+ if (n==-1 && PyErr_Occurred()) {
+ return -1;
+ }
+ y = PyInt_FromLong(n);
+ if (!y) {
+ return -1;
+ }
+ eq = PyObject_RichCompareBool(item,y,Py_EQ);
+ Py_DECREF(y);
+ if (eq<0) {
+ return -1;
+ }
+ if (!eq) {
+ PyErr_Format(PyExc_TypeError,
+ "expected rational, got %s", item->ob_type->tp_name);
+ return -1;
+ }
+ r = make_rational_int(n);
+ }
+ memcpy(data,&r,sizeof(rational));
+ return 0;
+}
+
+static NPY_INLINE void
+byteswap(npy_int32* x) {
+ char* p = (char*)x;
+ size_t i;
+ for (i = 0; i < sizeof(*x)/2; i++) {
+ size_t j = sizeof(*x)-1-i;
+ char t = p[i];
+ p[i] = p[j];
+ p[j] = t;
+ }
+}
+
+static void
+npyrational_copyswapn(void* dst_, npy_intp dstride, void* src_,
+ npy_intp sstride, npy_intp n, int swap, void* arr) {
+ char *dst = (char*)dst_, *src = (char*)src_;
+ npy_intp i;
+ if (!src) {
+ return;
+ }
+ if (swap) {
+ for (i = 0; i < n; i++) {
+ rational* r = (rational*)(dst+dstride*i);
+ memcpy(r,src+sstride*i,sizeof(rational));
+ byteswap(&r->n);
+ byteswap(&r->dmm);
+ }
+ }
+ else if (dstride == sizeof(rational) && sstride == sizeof(rational)) {
+ memcpy(dst, src, n*sizeof(rational));
+ }
+ else {
+ for (i = 0; i < n; i++) {
+ memcpy(dst + dstride*i, src + sstride*i, sizeof(rational));
+ }
+ }
+}
+
+static void
+npyrational_copyswap(void* dst, void* src, int swap, void* arr) {
+ rational* r;
+ if (!src) {
+ return;
+ }
+ r = (rational*)dst;
+ memcpy(r,src,sizeof(rational));
+ if (swap) {
+ byteswap(&r->n);
+ byteswap(&r->dmm);
+ }
+}
+
+static int
+npyrational_compare(const void* d0, const void* d1, void* arr) {
+ rational x = *(rational*)d0,
+ y = *(rational*)d1;
+ return rational_lt(x,y)?-1:rational_eq(x,y)?0:1;
+}
+
+#define FIND_EXTREME(name,op) \
+ static int \
+ npyrational_##name(void* data_, npy_intp n, \
+ npy_intp* max_ind, void* arr) { \
+ const rational* data; \
+ npy_intp best_i; \
+ rational best_r; \
+ npy_intp i; \
+ if (!n) { \
+ return 0; \
+ } \
+ data = (rational*)data_; \
+ best_i = 0; \
+ best_r = data[0]; \
+ i; \
+ for (i = 1; i < n; i++) { \
+ if (rational_##op(data[i],best_r)) { \
+ best_i = i; \
+ best_r = data[i]; \
+ } \
+ } \
+ *max_ind = best_i; \
+ return 0; \
+ }
+FIND_EXTREME(argmin,lt)
+FIND_EXTREME(argmax,gt)
+
+static void
+npyrational_dot(void* ip0_, npy_intp is0, void* ip1_, npy_intp is1,
+ void* op, npy_intp n, void* arr) {
+ rational r = {0};
+ const char *ip0 = (char*)ip0_, *ip1 = (char*)ip1_;
+ npy_intp i;
+ for (i = 0; i < n; i++) {
+ r = rational_add(r,rational_multiply(*(rational*)ip0,*(rational*)ip1));
+ ip0 += is0;
+ ip1 += is1;
+ }
+ *(rational*)op = r;
+}
+
+static npy_bool
+npyrational_nonzero(void* data, void* arr) {
+ rational r;
+ memcpy(&r,data,sizeof(r));
+ return rational_nonzero(r)?NPY_TRUE:NPY_FALSE;
+}
+
+static int
+npyrational_fill(void* data_, npy_intp length, void* arr) {
+ rational* data = (rational*)data_;
+ rational delta = rational_subtract(data[1],data[0]);
+ rational r = data[1];
+ npy_intp i;
+ for (i = 2; i < length; i++) {
+ r = rational_add(r,delta);
+ data[i] = r;
+ }
+ return 0;
+}
+
+static int
+npyrational_fillwithscalar(void* buffer_, npy_intp length,
+ void* value, void* arr) {
+ rational r = *(rational*)value;
+ rational* buffer = (rational*)buffer_;
+ npy_intp i;
+ for (i = 0; i < length; i++) {
+ buffer[i] = r;
+ }
+ return 0;
+}
+
+static PyArray_ArrFuncs npyrational_arrfuncs;
+
+typedef struct { char c; rational r; } align_test;
+
+PyArray_Descr npyrational_descr = {
+ PyObject_HEAD_INIT(0)
+ &PyRational_Type, /* typeobj */
+ 'V', /* kind */
+ 'r', /* type */
+ '=', /* byteorder */
+ /*
+ * For now, we need NPY_NEEDS_PYAPI in order to make numpy detect our
+ * exceptions. This isn't technically necessary,
+ * since we're careful about thread safety, and hopefully future
+ * versions of numpy will recognize that.
+ */
+ NPY_NEEDS_PYAPI | NPY_USE_GETITEM | NPY_USE_SETITEM, /* hasobject */
+ 0, /* type_num */
+ sizeof(rational), /* elsize */
+ offsetof(align_test,r), /* alignment */
+ 0, /* subarray */
+ 0, /* fields */
+ 0, /* names */
+ &npyrational_arrfuncs, /* f */
+};
+
+#define DEFINE_CAST(From,To,statement) \
+ static void \
+ npycast_##From##_##To(void* from_, void* to_, npy_intp n, \
+ void* fromarr, void* toarr) { \
+ const From* from = (From*)from_; \
+ To* to = (To*)to_; \
+ npy_intp i; \
+ for (i = 0; i < n; i++) { \
+ From x = from[i]; \
+ statement \
+ to[i] = y; \
+ } \
+ }
+#define DEFINE_INT_CAST(bits) \
+ DEFINE_CAST(npy_int##bits,rational,rational y = make_rational_int(x);) \
+ DEFINE_CAST(rational,npy_int##bits,npy_int32 z = rational_int(x); \
+ npy_int##bits y = z; if (y != z) set_overflow();)
+DEFINE_INT_CAST(8)
+DEFINE_INT_CAST(16)
+DEFINE_INT_CAST(32)
+DEFINE_INT_CAST(64)
+DEFINE_CAST(rational,float,double y = rational_double(x);)
+DEFINE_CAST(rational,double,double y = rational_double(x);)
+DEFINE_CAST(npy_bool,rational,rational y = make_rational_int(x);)
+DEFINE_CAST(rational,npy_bool,npy_bool y = rational_nonzero(x);)
+
+#define BINARY_UFUNC(name,intype0,intype1,outtype,exp) \
+ void name(char** args, npy_intp* dimensions, \
+ npy_intp* steps, void* data) { \
+ npy_intp is0 = steps[0], is1 = steps[1], \
+ os = steps[2], n = *dimensions; \
+ char *i0 = args[0], *i1 = args[1], *o = args[2]; \
+ int k; \
+ for (k = 0; k < n; k++) { \
+ intype0 x = *(intype0*)i0; \
+ intype1 y = *(intype1*)i1; \
+ *(outtype*)o = exp; \
+ i0 += is0; i1 += is1; o += os; \
+ } \
+ }
+#define RATIONAL_BINARY_UFUNC(name,type,exp) \
+ BINARY_UFUNC(rational_ufunc_##name,rational,rational,type,exp)
+RATIONAL_BINARY_UFUNC(add,rational,rational_add(x,y))
+RATIONAL_BINARY_UFUNC(subtract,rational,rational_subtract(x,y))
+RATIONAL_BINARY_UFUNC(multiply,rational,rational_multiply(x,y))
+RATIONAL_BINARY_UFUNC(divide,rational,rational_divide(x,y))
+RATIONAL_BINARY_UFUNC(remainder,rational,rational_remainder(x,y))
+RATIONAL_BINARY_UFUNC(floor_divide,rational,
+ make_rational_int(rational_floor(rational_divide(x,y))))
+PyUFuncGenericFunction rational_ufunc_true_divide = rational_ufunc_divide;
+RATIONAL_BINARY_UFUNC(minimum,rational,rational_lt(x,y)?x:y)
+RATIONAL_BINARY_UFUNC(maximum,rational,rational_lt(x,y)?y:x)
+RATIONAL_BINARY_UFUNC(equal,npy_bool,rational_eq(x,y))
+RATIONAL_BINARY_UFUNC(not_equal,npy_bool,rational_ne(x,y))
+RATIONAL_BINARY_UFUNC(less,npy_bool,rational_lt(x,y))
+RATIONAL_BINARY_UFUNC(greater,npy_bool,rational_gt(x,y))
+RATIONAL_BINARY_UFUNC(less_equal,npy_bool,rational_le(x,y))
+RATIONAL_BINARY_UFUNC(greater_equal,npy_bool,rational_ge(x,y))
+
+BINARY_UFUNC(gcd_ufunc,npy_int64,npy_int64,npy_int64,gcd(x,y))
+BINARY_UFUNC(lcm_ufunc,npy_int64,npy_int64,npy_int64,lcm(x,y))
+
+#define UNARY_UFUNC(name,type,exp) \
+ void rational_ufunc_##name(char** args, npy_intp* dimensions, \
+ npy_intp* steps, void* data) { \
+ npy_intp is = steps[0], os = steps[1], n = *dimensions; \
+ char *i = args[0], *o = args[1]; \
+ int k; \
+ for (k = 0; k < n; k++) { \
+ rational x = *(rational*)i; \
+ *(type*)o = exp; \
+ i += is; o += os; \
+ } \
+ }
+UNARY_UFUNC(negative,rational,rational_negative(x))
+UNARY_UFUNC(absolute,rational,rational_abs(x))
+UNARY_UFUNC(floor,rational,make_rational_int(rational_floor(x)))
+UNARY_UFUNC(ceil,rational,make_rational_int(rational_ceil(x)))
+UNARY_UFUNC(trunc,rational,make_rational_int(x.n/d(x)))
+UNARY_UFUNC(square,rational,rational_multiply(x,x))
+UNARY_UFUNC(rint,rational,make_rational_int(rational_rint(x)))
+UNARY_UFUNC(sign,rational,make_rational_int(rational_sign(x)))
+UNARY_UFUNC(reciprocal,rational,rational_inverse(x))
+UNARY_UFUNC(numerator,npy_int64,x.n)
+UNARY_UFUNC(denominator,npy_int64,d(x))
+
+static NPY_INLINE void
+rational_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+ /* pointers to data for input and output arrays */
+ char *ip1 = args[0];
+ char *ip2 = args[1];
+ char *op = args[2];
+
+ /* lengths of core dimensions */
+ npy_intp dm = dimensions[0];
+ npy_intp dn = dimensions[1];
+ npy_intp dp = dimensions[2];
+
+ /* striding over core dimensions */
+ npy_intp is1_m = steps[0];
+ npy_intp is1_n = steps[1];
+ npy_intp is2_n = steps[2];
+ npy_intp is2_p = steps[3];
+ npy_intp os_m = steps[4];
+ npy_intp os_p = steps[5];
+
+ /* core dimensions counters */
+ npy_intp m, p;
+
+ /* calculate dot product for each row/column vector pair */
+ for (m = 0; m < dm; m++) {
+ for (p = 0; p < dp; p++) {
+ npyrational_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL);
+
+ /* advance to next column of 2nd input array and output array */
+ ip2 += is2_p;
+ op += os_p;
+ }
+
+ /* reset to first column of 2nd input array and output array */
+ ip2 -= is2_p * p;
+ op -= os_p * p;
+
+ /* advance to next row of 1st input array and output array */
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+
+static void
+rational_gufunc_matrix_multiply(char **args, npy_intp *dimensions,
+ npy_intp *steps, void *NPY_UNUSED(func))
+{
+ /* outer dimensions counter */
+ npy_intp N_;
+
+ /* length of flattened outer dimensions */
+ npy_intp dN = dimensions[0];
+
+ /* striding over flattened outer dimensions for input and output arrays */
+ npy_intp s0 = steps[0];
+ npy_intp s1 = steps[1];
+ npy_intp s2 = steps[2];
+
+ /*
+ * loop through outer dimensions, performing matrix multiply on
+ * core dimensions for each loop
+ */
+ for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2) {
+ rational_matrix_multiply(args, dimensions+1, steps+3);
+ }
+}
+
+
+static void
+rational_ufunc_test_add(char** args, npy_intp* dimensions,
+ npy_intp* steps, void* data) {
+ npy_intp is0 = steps[0], is1 = steps[1], os = steps[2], n = *dimensions;
+ char *i0 = args[0], *i1 = args[1], *o = args[2];
+ int k;
+ for (k = 0; k < n; k++) {
+ npy_int64 x = *(npy_int64*)i0;
+ npy_int64 y = *(npy_int64*)i1;
+ *(rational*)o = rational_add(make_rational_fast(x, 1),
+ make_rational_fast(y, 1));
+ i0 += is0; i1 += is1; o += os;
+ }
+}
+
+
+static void
+rational_ufunc_test_add_rationals(char** args, npy_intp* dimensions,
+ npy_intp* steps, void* data) {
+ npy_intp is0 = steps[0], is1 = steps[1], os = steps[2], n = *dimensions;
+ char *i0 = args[0], *i1 = args[1], *o = args[2];
+ int k;
+ for (k = 0; k < n; k++) {
+ rational x = *(rational*)i0;
+ rational y = *(rational*)i1;
+ *(rational*)o = rational_add(x, y);
+ i0 += is0; i1 += is1; o += os;
+ }
+}
+
+
+PyMethodDef module_methods[] = {
+ {0} /* sentinel */
+};
+
+#if defined(NPY_PY3K)
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "test_rational",
+ NULL,
+ -1,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#endif
+
+#if defined(NPY_PY3K)
+#define RETVAL m
+PyMODINIT_FUNC PyInit_test_rational(void) {
+#else
+#define RETVAL
+PyMODINIT_FUNC inittest_rational(void) {
+#endif
+
+ PyObject *m = NULL;
+ PyObject* numpy_str;
+ PyObject* numpy;
+ int npy_rational;
+
+ import_array();
+ if (PyErr_Occurred()) {
+ goto fail;
+ }
+ import_umath();
+ if (PyErr_Occurred()) {
+ goto fail;
+ }
+ numpy_str = PyUString_FromString("numpy");
+ if (!numpy_str) {
+ goto fail;
+ }
+ numpy = PyImport_Import(numpy_str);
+ Py_DECREF(numpy_str);
+ if (!numpy) {
+ goto fail;
+ }
+
+ /* Can't set this until we import numpy */
+ PyRational_Type.tp_base = &PyGenericArrType_Type;
+
+ /* Initialize rational type object */
+ if (PyType_Ready(&PyRational_Type) < 0) {
+ goto fail;
+ }
+
+ /* Initialize rational descriptor */
+ PyArray_InitArrFuncs(&npyrational_arrfuncs);
+ npyrational_arrfuncs.getitem = npyrational_getitem;
+ npyrational_arrfuncs.setitem = npyrational_setitem;
+ npyrational_arrfuncs.copyswapn = npyrational_copyswapn;
+ npyrational_arrfuncs.copyswap = npyrational_copyswap;
+ npyrational_arrfuncs.compare = npyrational_compare;
+ npyrational_arrfuncs.argmin = npyrational_argmin;
+ npyrational_arrfuncs.argmax = npyrational_argmax;
+ npyrational_arrfuncs.dotfunc = npyrational_dot;
+ npyrational_arrfuncs.nonzero = npyrational_nonzero;
+ npyrational_arrfuncs.fill = npyrational_fill;
+ npyrational_arrfuncs.fillwithscalar = npyrational_fillwithscalar;
+ /* Left undefined: scanfunc, fromstr, sort, argsort */
+ Py_TYPE(&npyrational_descr) = &PyArrayDescr_Type;
+ npy_rational = PyArray_RegisterDataType(&npyrational_descr);
+ if (npy_rational<0) {
+ goto fail;
+ }
+
+ /* Support dtype(rational) syntax */
+ if (PyDict_SetItemString(PyRational_Type.tp_dict, "dtype",
+ (PyObject*)&npyrational_descr) < 0) {
+ goto fail;
+ }
+
+ /* Register casts to and from rational */
+ #define REGISTER_CAST(From,To,from_descr,to_typenum,safe) { \
+ PyArray_Descr* from_descr_##From##_##To = (from_descr); \
+ if (PyArray_RegisterCastFunc(from_descr_##From##_##To, \
+ (to_typenum), \
+ npycast_##From##_##To) < 0) { \
+ goto fail; \
+ } \
+ if (safe && PyArray_RegisterCanCast(from_descr_##From##_##To, \
+ (to_typenum), \
+ NPY_NOSCALAR) < 0) { \
+ goto fail; \
+ } \
+ }
+ #define REGISTER_INT_CASTS(bits) \
+ REGISTER_CAST(npy_int##bits, rational, \
+ PyArray_DescrFromType(NPY_INT##bits), npy_rational, 1) \
+ REGISTER_CAST(rational, npy_int##bits, &npyrational_descr, \
+ NPY_INT##bits, 0)
+ REGISTER_INT_CASTS(8)
+ REGISTER_INT_CASTS(16)
+ REGISTER_INT_CASTS(32)
+ REGISTER_INT_CASTS(64)
+ REGISTER_CAST(rational,float,&npyrational_descr,NPY_FLOAT,0)
+ REGISTER_CAST(rational,double,&npyrational_descr,NPY_DOUBLE,1)
+ REGISTER_CAST(npy_bool,rational, PyArray_DescrFromType(NPY_BOOL),
+ npy_rational,1)
+ REGISTER_CAST(rational,npy_bool,&npyrational_descr,NPY_BOOL,0)
+
+ /* Register ufuncs */
+ #define REGISTER_UFUNC(name,...) { \
+ PyUFuncObject* ufunc = \
+ (PyUFuncObject*)PyObject_GetAttrString(numpy, #name); \
+ int _types[] = __VA_ARGS__; \
+ if (!ufunc) { \
+ goto fail; \
+ } \
+ if (sizeof(_types)/sizeof(int)!=ufunc->nargs) { \
+ PyErr_Format(PyExc_AssertionError, \
+ "ufunc %s takes %d arguments, our loop takes %ld", \
+ #name, ufunc->nargs, sizeof(_types)/sizeof(int)); \
+ goto fail; \
+ } \
+ if (PyUFunc_RegisterLoopForType((PyUFuncObject*)ufunc, npy_rational, \
+ rational_ufunc_##name, _types, 0) < 0) { \
+ goto fail; \
+ } \
+ }
+ #define REGISTER_UFUNC_BINARY_RATIONAL(name) \
+ REGISTER_UFUNC(name, {npy_rational, npy_rational, npy_rational})
+ #define REGISTER_UFUNC_BINARY_COMPARE(name) \
+ REGISTER_UFUNC(name, {npy_rational, npy_rational, NPY_BOOL})
+ #define REGISTER_UFUNC_UNARY(name) \
+ REGISTER_UFUNC(name, {npy_rational, npy_rational})
+ /* Binary */
+ REGISTER_UFUNC_BINARY_RATIONAL(add)
+ REGISTER_UFUNC_BINARY_RATIONAL(subtract)
+ REGISTER_UFUNC_BINARY_RATIONAL(multiply)
+ REGISTER_UFUNC_BINARY_RATIONAL(divide)
+ REGISTER_UFUNC_BINARY_RATIONAL(remainder)
+ REGISTER_UFUNC_BINARY_RATIONAL(true_divide)
+ REGISTER_UFUNC_BINARY_RATIONAL(floor_divide)
+ REGISTER_UFUNC_BINARY_RATIONAL(minimum)
+ REGISTER_UFUNC_BINARY_RATIONAL(maximum)
+ /* Comparisons */
+ REGISTER_UFUNC_BINARY_COMPARE(equal)
+ REGISTER_UFUNC_BINARY_COMPARE(not_equal)
+ REGISTER_UFUNC_BINARY_COMPARE(less)
+ REGISTER_UFUNC_BINARY_COMPARE(greater)
+ REGISTER_UFUNC_BINARY_COMPARE(less_equal)
+ REGISTER_UFUNC_BINARY_COMPARE(greater_equal)
+ /* Unary */
+ REGISTER_UFUNC_UNARY(negative)
+ REGISTER_UFUNC_UNARY(absolute)
+ REGISTER_UFUNC_UNARY(floor)
+ REGISTER_UFUNC_UNARY(ceil)
+ REGISTER_UFUNC_UNARY(trunc)
+ REGISTER_UFUNC_UNARY(rint)
+ REGISTER_UFUNC_UNARY(square)
+ REGISTER_UFUNC_UNARY(reciprocal)
+ REGISTER_UFUNC_UNARY(sign)
+
+ /* Create module */
+#if defined(NPY_PY3K)
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule("test_rational", module_methods);
+#endif
+
+ if (!m) {
+ goto fail;
+ }
+
+ /* Add rational type */
+ Py_INCREF(&PyRational_Type);
+ PyModule_AddObject(m,"rational",(PyObject*)&PyRational_Type);
+
+ /* Create matrix multiply generalized ufunc */
+ {
+ int types2[3] = {npy_rational,npy_rational,npy_rational};
+ PyObject* gufunc = PyUFunc_FromFuncAndDataAndSignature(0,0,0,0,2,1,
+ PyUFunc_None,(char*)"matrix_multiply",
+ (char*)"return result of multiplying two matrices of rationals",
+ 0,"(m,n),(n,p)->(m,p)");
+ if (!gufunc) {
+ goto fail;
+ }
+ if (PyUFunc_RegisterLoopForType((PyUFuncObject*)gufunc, npy_rational,
+ rational_gufunc_matrix_multiply, types2, 0) < 0) {
+ goto fail;
+ }
+ PyModule_AddObject(m,"matrix_multiply",(PyObject*)gufunc);
+ }
+
+ /* Create test ufunc with built in input types and rational output type */
+ {
+ int types3[3] = {NPY_INT64,NPY_INT64,npy_rational};
+
+ PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1,
+ PyUFunc_None,(char*)"test_add",
+ (char*)"add two matrices of int64 and return rational matrix",0);
+ if (!ufunc) {
+ goto fail;
+ }
+ if (PyUFunc_RegisterLoopForType((PyUFuncObject*)ufunc, npy_rational,
+ rational_ufunc_test_add, types3, 0) < 0) {
+ goto fail;
+ }
+ PyModule_AddObject(m,"test_add",(PyObject*)ufunc);
+ }
+
+ /* Create test ufunc with rational types using RegisterLoopForDescr */
+ {
+ PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1,
+ PyUFunc_None,(char*)"test_add_rationals",
+ (char*)"add two matrices of rationals and return rational matrix",0);
+ PyArray_Descr* types[3] = {&npyrational_descr,
+ &npyrational_descr,
+ &npyrational_descr};
+
+ if (!ufunc) {
+ goto fail;
+ }
+ if (PyUFunc_RegisterLoopForDescr((PyUFuncObject*)ufunc, &npyrational_descr,
+ rational_ufunc_test_add_rationals, types, 0) < 0) {
+ goto fail;
+ }
+ PyModule_AddObject(m,"test_add_rationals",(PyObject*)ufunc);
+ }
+
+ /* Create numerator and denominator ufuncs */
+ #define NEW_UNARY_UFUNC(name,type,doc) { \
+ int types[2] = {npy_rational,type}; \
+ PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,1,1, \
+ PyUFunc_None,(char*)#name,(char*)doc,0); \
+ if (!ufunc) { \
+ goto fail; \
+ } \
+ if (PyUFunc_RegisterLoopForType((PyUFuncObject*)ufunc, \
+ npy_rational,rational_ufunc_##name,types,0)<0) { \
+ goto fail; \
+ } \
+ PyModule_AddObject(m,#name,(PyObject*)ufunc); \
+ }
+ NEW_UNARY_UFUNC(numerator,NPY_INT64,"rational number numerator");
+ NEW_UNARY_UFUNC(denominator,NPY_INT64,"rational number denominator");
+
+ /* Create gcd and lcm ufuncs */
+ #define GCD_LCM_UFUNC(name,type,doc) { \
+ static const PyUFuncGenericFunction func[1] = {name##_ufunc}; \
+ static const char types[3] = {type,type,type}; \
+ static void* data[1] = {0}; \
+ PyObject* ufunc = PyUFunc_FromFuncAndData( \
+ (PyUFuncGenericFunction*)func, data,(char*)types, \
+ 1,2,1,PyUFunc_One,(char*)#name,(char*)doc,0); \
+ if (!ufunc) { \
+ goto fail; \
+ } \
+ PyModule_AddObject(m,#name,(PyObject*)ufunc); \
+ }
+ GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers");
+ GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers");
+
+ return RETVAL;
+
+fail:
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot load test_rational module.");
+ }
+#if defined(NPY_PY3K)
+ if (m) {
+ Py_DECREF(m);
+ m = NULL;
+ }
+#endif
+ return RETVAL;
+}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 2712f0474..7ec5b977f 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -71,6 +71,12 @@
static int
_does_loop_use_arrays(void *data);
+static int
+assign_reduce_identity_zero(PyArrayObject *result);
+
+static int
+assign_reduce_identity_one(PyArrayObject *result);
+
/*
* fpstatus is the ufunc_formatted hardware status
* errmask is the handling mask specified by the user.
@@ -486,6 +492,13 @@ _has_reflected_op(PyObject *op, char *name)
_GETATTR_(bitwise_and, rand);
_GETATTR_(bitwise_xor, rxor);
_GETATTR_(bitwise_or, ror);
+ /* Comparisons */
+ _GETATTR_(equal, eq);
+ _GETATTR_(not_equal, ne);
+ _GETATTR_(greater, lt);
+ _GETATTR_(less, gt);
+ _GETATTR_(greater_equal, le);
+ _GETATTR_(less_equal, ge);
return 0;
}
@@ -714,12 +727,13 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
int *out_subok,
PyArrayObject **out_wheremask)
{
- int i, nargs, nin = ufunc->nin, nout = ufunc->nout;
+ int i, nargs, nin = ufunc->nin;
PyObject *obj, *context;
PyObject *str_key_obj = NULL;
char *ufunc_name;
+ int type_num;
- int any_flexible = 0, any_object = 0;
+ int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
@@ -739,42 +753,81 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
/* Get input arguments */
for (i = 0; i < nin; ++i) {
obj = PyTuple_GET_ITEM(args, i);
- if (!PyArray_Check(obj) && !PyArray_IsScalar(obj, Generic)) {
- /*
- * TODO: There should be a comment here explaining what
- * context does.
- */
- context = Py_BuildValue("OOi", ufunc, args, i);
- if (context == NULL) {
- return -1;
- }
+
+ if (PyArray_Check(obj)) {
+ out_op[i] = (PyArrayObject *)PyArray_FromArray(obj,NULL,0);
}
else {
- context = NULL;
- }
- out_op[i] = (PyArrayObject *)PyArray_FromAny(obj,
+ if (!PyArray_IsScalar(obj, Generic)) {
+ /*
+ * TODO: There should be a comment here explaining what
+ * context does.
+ */
+ context = Py_BuildValue("OOi", ufunc, args, i);
+ if (context == NULL) {
+ return -1;
+ }
+ }
+ else {
+ context = NULL;
+ }
+ out_op[i] = (PyArrayObject *)PyArray_FromAny(obj,
NULL, 0, 0, 0, context);
- Py_XDECREF(context);
+ Py_XDECREF(context);
+ }
+
if (out_op[i] == NULL) {
return -1;
}
+
+ type_num = PyArray_DESCR(out_op[i])->type_num;
if (!any_flexible &&
- PyTypeNum_ISFLEXIBLE(PyArray_DESCR(out_op[i])->type_num)) {
+ PyTypeNum_ISFLEXIBLE(type_num)) {
any_flexible = 1;
}
if (!any_object &&
- PyTypeNum_ISOBJECT(PyArray_DESCR(out_op[i])->type_num)) {
+ PyTypeNum_ISOBJECT(type_num)) {
any_object = 1;
}
+
+ /*
+ * If any operand is a flexible dtype, check to see if any
+ * struct dtype ufuncs are registered. A ufunc has been registered
+ * for a struct dtype if ufunc's arg_dtypes array is not NULL.
+ */
+ if (PyTypeNum_ISFLEXIBLE(type_num) &&
+ !any_flexible_userloops &&
+ ufunc->userloops != NULL) {
+ PyUFunc_Loop1d *funcdata;
+ PyObject *key, *obj;
+ key = PyInt_FromLong(type_num);
+ if (key == NULL) {
+ continue;
+ }
+ obj = PyDict_GetItem(ufunc->userloops, key);
+ Py_DECREF(key);
+ if (obj == NULL) {
+ continue;
+ }
+ funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ while (funcdata != NULL) {
+ if (funcdata->arg_dtypes != NULL) {
+ any_flexible_userloops = 1;
+ break;
+ }
+ funcdata = funcdata->next;
+ }
+ }
}
/*
* Indicate not implemented if there are flexible objects (structured
- * type or string) but no object types.
+ * type or string) but no object types and no registered struct
+ * dtype ufuncs.
*
* Not sure - adding this increased to 246 errors, 150 failures.
*/
- if (any_flexible && !any_object) {
+ if (any_flexible && !any_flexible_userloops && !any_object) {
return -2;
}
@@ -1045,7 +1098,7 @@ trivial_two_operand_loop(PyArrayObject **op,
NPY_UF_DBG_PRINT1("two operand loop count %d\n", (int)count[0]);
if (!needs_api) {
- NPY_BEGIN_THREADS;
+ NPY_BEGIN_THREADS_THRESHOLDED(count[0]);
}
innerloop(data, count, stride, innerloopdata);
@@ -1078,7 +1131,7 @@ trivial_three_operand_loop(PyArrayObject **op,
NPY_UF_DBG_PRINT1("three operand loop count %d\n", (int)count[0]);
if (!needs_api) {
- NPY_BEGIN_THREADS;
+ NPY_BEGIN_THREADS_THRESHOLDED(count[0]);
}
innerloop(data, count, stride, innerloopdata);
@@ -1175,37 +1228,49 @@ iterator_loop(PyUFuncObject *ufunc,
npy_intp *count_ptr;
PyArrayObject **op_it;
+ npy_uint32 iter_flags;
NPY_BEGIN_THREADS_DEF;
/* Set up the flags */
for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY|
+ op_flags[i] = NPY_ITER_READONLY |
NPY_ITER_ALIGNED;
+ /*
+ * If READWRITE flag has been set for this operand,
+ * then clear default READONLY flag
+ */
+ op_flags[i] |= ufunc->op_flags[i];
+ if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
+ op_flags[i] &= ~NPY_ITER_READONLY;
+ }
}
for (i = nin; i < nop; ++i) {
- op_flags[i] = NPY_ITER_WRITEONLY|
- NPY_ITER_ALIGNED|
- NPY_ITER_ALLOCATE|
- NPY_ITER_NO_BROADCAST|
+ op_flags[i] = NPY_ITER_WRITEONLY |
+ NPY_ITER_ALIGNED |
+ NPY_ITER_ALLOCATE |
+ NPY_ITER_NO_BROADCAST |
NPY_ITER_NO_SUBTYPE;
}
+ iter_flags = ufunc->iter_flags |
+ NPY_ITER_EXTERNAL_LOOP |
+ NPY_ITER_REFS_OK |
+ NPY_ITER_ZEROSIZE_OK |
+ NPY_ITER_BUFFERED |
+ NPY_ITER_GROWINNER |
+ NPY_ITER_DELAY_BUFALLOC;
+
/*
* Allocate the iterator. Because the types of the inputs
* were already checked, we use the casting rule 'unsafe' which
* is faster to calculate.
*/
iter = NpyIter_AdvancedNew(nop, op,
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK|
- NPY_ITER_BUFFERED|
- NPY_ITER_GROWINNER|
- NPY_ITER_DELAY_BUFALLOC,
+ iter_flags,
order, NPY_UNSAFE_CASTING,
op_flags, dtype,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
return -1;
}
@@ -1456,6 +1521,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
npy_intp *countptr;
PyArrayObject **op_it;
+ npy_uint32 iter_flags;
NPY_BEGIN_THREADS_DEF;
@@ -1475,6 +1541,14 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
op_flags[i] = default_op_in_flags |
NPY_ITER_READONLY |
NPY_ITER_ALIGNED;
+ /*
+ * If READWRITE flag has been set for this operand,
+ * then clear default READONLY flag
+ */
+ op_flags[i] |= ufunc->op_flags[i];
+ if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
+ op_flags[i] &= ~NPY_ITER_READONLY;
+ }
}
for (i = nin; i < nop; ++i) {
op_flags[i] = default_op_out_flags |
@@ -1490,20 +1564,23 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("Making iterator\n");
+ iter_flags = ufunc->iter_flags |
+ NPY_ITER_EXTERNAL_LOOP |
+ NPY_ITER_REFS_OK |
+ NPY_ITER_ZEROSIZE_OK |
+ NPY_ITER_BUFFERED |
+ NPY_ITER_GROWINNER;
+
/*
* Allocate the iterator. Because the types of the inputs
* were already checked, we use the casting rule 'unsafe' which
* is faster to calculate.
*/
iter = NpyIter_AdvancedNew(nop + ((wheremask != NULL) ? 1 : 0), op,
- NPY_ITER_EXTERNAL_LOOP |
- NPY_ITER_REFS_OK |
- NPY_ITER_ZEROSIZE_OK |
- NPY_ITER_BUFFERED |
- NPY_ITER_GROWINNER,
+ iter_flags,
order, NPY_UNSAFE_CASTING,
op_flags, dtypes,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
return -1;
}
@@ -1643,7 +1720,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyArrayObject **op)
{
int nin, nout;
- int i, idim, nop;
+ int i, j, idim, nop;
char *ufunc_name;
int retval = -1, subok = 1;
int needs_api = 0;
@@ -1651,13 +1728,14 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyArray_Descr *dtypes[NPY_MAXARGS];
/* Use remapped axes for generalized ufunc */
- int broadcast_ndim, op_ndim;
+ int broadcast_ndim, iter_ndim;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
npy_uint32 op_flags[NPY_MAXARGS];
-
+ npy_intp iter_shape[NPY_MAXARGS];
NpyIter *iter = NULL;
+ npy_uint32 iter_flags;
/* These parameters come from extobj= or from a TLS global */
int buffersize = 0, errormask = 0;
@@ -1672,8 +1750,9 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* The strides which get passed to the inner loop */
npy_intp *inner_strides = NULL;
- npy_intp *inner_strides_tmp, *ax_strides_tmp[NPY_MAXDIMS];
- int core_dim_ixs_size, *core_dim_ixs;
+ /* The sizes of the core dimensions (# entries is ufunc->core_num_dim_ix) */
+ npy_intp *core_dim_sizes = inner_dimensions + 1;
+ int core_dim_ixs_size;
/* The __array_prepare__ function to call for each output */
PyObject *arr_prep[NPY_MAXARGS];
@@ -1719,7 +1798,11 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
goto fail;
}
- /* Figure out the number of dimensions needed by the iterator */
+ /*
+ * Figure out the number of iteration dimensions, which
+ * is the broadcast result of all the input non-core
+ * dimensions.
+ */
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
int n = PyArray_NDIM(op[i]) - ufunc->core_num_dims[i];
@@ -1727,8 +1810,18 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
broadcast_ndim = n;
}
}
- op_ndim = broadcast_ndim + ufunc->core_num_dim_ix;
- if (op_ndim > NPY_MAXDIMS) {
+
+ /*
+ * Figure out the number of iterator creation dimensions,
+ * which is the broadcast dimensions + all the core dimensions of
+ * the outputs, so that the iterator can allocate those output
+ * dimensions following the rules of order='F', for example.
+ */
+ iter_ndim = broadcast_ndim;
+ for (i = nin; i < nop; ++i) {
+ iter_ndim += ufunc->core_num_dims[i];
+ }
+ if (iter_ndim > NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"too many dimensions for generalized ufunc %s",
ufunc_name);
@@ -1736,9 +1829,76 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
goto fail;
}
+ /*
+ * Validate the core dimensions of all the operands,
+ * and collect all of the labeled core dimension sizes
+ * into the array 'core_dim_sizes'. Initialize them to
+ * 1, for example in the case where the operand broadcasts
+ * to a core dimension, it won't be visited.
+ */
+ for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
+ core_dim_sizes[i] = 1;
+ }
+ for (i = 0; i < nop; ++i) {
+ if (op[i] != NULL) {
+ int dim_offset = ufunc->core_offsets[i];
+ int num_dims = ufunc->core_num_dims[i];
+ int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
+ /* Make sure any output operand has enough dimensions */
+ if (i >= nin && core_start_dim < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "%s: Output operand %d does not have enough dimensions "
+ "(has %d, gufunc core with signature %s "
+ "requires %d)",
+ ufunc_name, i - nin, PyArray_NDIM(op[i]),
+ ufunc->core_signature, num_dims);
+ retval = -1;
+ goto fail;
+ }
+
+ /*
+ * Make sure each core dimension matches all other core
+ * dimensions with the same label
+ *
+ * NOTE: For input operands, core_start_dim may be negative.
+ * In that case, the operand is being broadcast onto
+ * core dimensions. For example, a scalar will broadcast
+ * to fit any core signature.
+ */
+ if (core_start_dim >= 0) {
+ idim = 0;
+ } else {
+ idim = -core_start_dim;
+ }
+ for (; idim < num_dims; ++idim) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim];
+ npy_intp op_dim_size =
+ PyArray_SHAPE(op[i])[core_start_dim + idim];
+ if (core_dim_sizes[core_dim_index] == 1) {
+ core_dim_sizes[core_dim_index] = op_dim_size;
+ } else if ((i >= nin || op_dim_size != 1) &&
+ core_dim_sizes[core_dim_index] != op_dim_size) {
+ PyErr_Format(PyExc_ValueError,
+ "%s: Operand %d has a mismatch in its core "
+ "dimension %d, with gufunc signature %s "
+ "(size %zd is different from %zd)",
+ ufunc_name, i, idim, ufunc->core_signature,
+ op_dim_size, core_dim_sizes[core_dim_index]);
+ retval = -1;
+ goto fail;
+ }
+ }
+ }
+ }
+
+ /* Fill in the initial part of 'iter_shape' */
+ for (idim = 0; idim < broadcast_ndim; ++idim) {
+ iter_shape[idim] = -1;
+ }
+
/* Fill in op_axes for all the operands */
+ j = broadcast_ndim;
core_dim_ixs_size = 0;
- core_dim_ixs = ufunc->core_dim_ixs;
for (i = 0; i < nop; ++i) {
int n;
if (op[i]) {
@@ -1760,22 +1920,27 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
op_axes_arrays[i][idim] = -1;
}
}
- /* Use the signature information for the rest */
- for (idim = broadcast_ndim; idim < op_ndim; ++idim) {
+
+ /* Any output core dimensions shape should be ignored */
+ for (idim = broadcast_ndim; idim < iter_ndim; ++idim) {
op_axes_arrays[i][idim] = -1;
}
- for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
- if (n + idim >= 0) {
- op_axes_arrays[i][broadcast_ndim + core_dim_ixs[idim]] =
- n + idim;
- }
- else {
- op_axes_arrays[i][broadcast_ndim + core_dim_ixs[idim]] = -1;
+
+ /* Except for when it belongs to this output */
+ if (i >= nin) {
+ int dim_offset = ufunc->core_offsets[i];
+ int num_dims = ufunc->core_num_dims[i];
+ /* Fill in 'iter_shape' and 'op_axes' for this output */
+ for (idim = 0; idim < num_dims; ++idim) {
+ iter_shape[j] = core_dim_sizes[
+ ufunc->core_dim_ixs[dim_offset + idim]];
+ op_axes_arrays[i][j] = n + idim;
+ ++j;
}
}
- core_dim_ixs_size += ufunc->core_num_dims[i];
- core_dim_ixs += ufunc->core_num_dims[i];
+
op_axes[i] = op_axes_arrays[i];
+ core_dim_ixs_size += ufunc->core_num_dims[i];
}
/* Get the buffersize, errormask, and error object globals */
@@ -1869,9 +2034,17 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
* can't do buffering, so must COPY or UPDATEIFCOPY.
*/
for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY|
- NPY_ITER_COPY|
+ op_flags[i] = NPY_ITER_READONLY |
+ NPY_ITER_COPY |
NPY_ITER_ALIGNED;
+ /*
+ * If READWRITE flag has been set for this operand,
+ * then clear default READONLY flag
+ */
+ op_flags[i] |= ufunc->op_flags[i];
+ if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
+ op_flags[i] &= ~NPY_ITER_READONLY;
+ }
}
for (i = nin; i < nop; ++i) {
op_flags[i] = NPY_ITER_READWRITE|
@@ -1881,12 +2054,17 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
NPY_ITER_NO_BROADCAST;
}
+ iter_flags = ufunc->iter_flags |
+ NPY_ITER_MULTI_INDEX |
+ NPY_ITER_REFS_OK |
+ NPY_ITER_REDUCE_OK |
+ NPY_ITER_ZEROSIZE_OK;
+
/* Create the iterator */
- iter = NpyIter_AdvancedNew(nop, op, NPY_ITER_MULTI_INDEX|
- NPY_ITER_REFS_OK|
- NPY_ITER_REDUCE_OK,
+ iter = NpyIter_AdvancedNew(nop, op, iter_flags,
order, NPY_UNSAFE_CASTING, op_flags,
- dtypes, op_ndim, op_axes, NULL, 0);
+ dtypes, iter_ndim,
+ op_axes, iter_shape, 0);
if (iter == NULL) {
retval = -1;
goto fail;
@@ -1906,37 +2084,37 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
inner_strides = (npy_intp *)PyArray_malloc(
NPY_SIZEOF_INTP * (nop+core_dim_ixs_size));
- /* The strides after the first nop match core_dim_ixs */
- core_dim_ixs = ufunc->core_dim_ixs;
- inner_strides_tmp = inner_strides + nop;
- for (idim = 0; idim < ufunc->core_num_dim_ix; ++idim) {
- ax_strides_tmp[idim] = NpyIter_GetAxisStrideArray(iter,
- broadcast_ndim+idim);
- if (ax_strides_tmp[idim] == NULL) {
- retval = -1;
- goto fail;
- }
- }
+ /* Copy the strides after the first nop */
+ idim = nop;
for (i = 0; i < nop; ++i) {
- for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
- inner_strides_tmp[idim] = ax_strides_tmp[core_dim_ixs[idim]][i];
+ int num_dims = ufunc->core_num_dims[i];
+ int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
+ /*
+ * Need to use the arrays in the iterator, not op, because
+ * a copy with a different-sized type may have been made.
+ */
+ PyArrayObject *arr = NpyIter_GetOperandArray(iter)[i];
+ npy_intp *shape = PyArray_SHAPE(arr);
+ npy_intp *strides = PyArray_STRIDES(arr);
+ for (j = 0; j < num_dims; ++j) {
+ if (core_start_dim + j >= 0) {
+ /*
+ * Force the stride to zero when the shape is 1, sot
+ * that the broadcasting works right.
+ */
+ if (shape[core_start_dim + j] != 1) {
+ inner_strides[idim++] = strides[core_start_dim + j];
+ } else {
+ inner_strides[idim++] = 0;
+ }
+ } else {
+ inner_strides[idim++] = 0;
+ }
}
-
- core_dim_ixs += ufunc->core_num_dims[i];
- inner_strides_tmp += ufunc->core_num_dims[i];
}
- /* Set up the inner dimensions array */
- if (NpyIter_GetShape(iter, inner_dimensions) != NPY_SUCCEED) {
- retval = -1;
- goto fail;
- }
- /* Move the core dimensions to start at the second element */
- memmove(&inner_dimensions[1], &inner_dimensions[broadcast_ndim],
- NPY_SIZEOF_INTP * ufunc->core_num_dim_ix);
-
- /* Remove all the core dimensions from the iterator */
- for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
+ /* Remove all the core output dimensions from the iterator */
+ for (i = broadcast_ndim; i < iter_ndim; ++i) {
if (NpyIter_RemoveAxis(iter, broadcast_ndim) != NPY_SUCCEED) {
retval = -1;
goto fail;
@@ -1971,8 +2149,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("Executing inner loop\n");
- /* Do the ufunc loop */
if (NpyIter_GetIterSize(iter) != 0) {
+ /* Do the ufunc loop */
NpyIter_IterNextFunc *iternext;
char **dataptr;
npy_intp *count_ptr;
@@ -1991,6 +2169,36 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
inner_dimensions[0] = *count_ptr;
innerloop(dataptr, inner_dimensions, inner_strides, innerloopdata);
} while (iternext(iter));
+ } else {
+ /**
+ * For each output operand, check if it has non-zero size,
+ * and assign the identity if it does. For example, a dot
+ * product of two zero-length arrays will be a scalar,
+ * which has size one.
+ */
+ for (i = nin; i < nop; ++i) {
+ if (PyArray_SIZE(op[i]) != 0) {
+ switch (ufunc->identity) {
+ case PyUFunc_Zero:
+ assign_reduce_identity_zero(op[i]);
+ break;
+ case PyUFunc_One:
+ assign_reduce_identity_one(op[i]);
+ break;
+ case PyUFunc_None:
+ case PyUFunc_ReorderableNone:
+ PyErr_Format(PyExc_ValueError,
+ "ufunc %s ",
+ ufunc_name);
+ goto fail;
+ default:
+ PyErr_Format(PyExc_ValueError,
+ "ufunc %s has an invalid identity for reduction",
+ ufunc_name);
+ goto fail;
+ }
+ }
+ }
}
/* Check whether any errors occurred during the loop */
@@ -2433,13 +2641,13 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
}
static int
-assign_reduce_identity_zero(PyArrayObject *result, void *data)
+assign_reduce_identity_zero(PyArrayObject *result)
{
return PyArray_FillWithScalar(result, PyArrayScalar_False);
}
static int
-assign_reduce_identity_one(PyArrayObject *result, void *data)
+assign_reduce_identity_one(PyArrayObject *result)
{
return PyArray_FillWithScalar(result, PyArrayScalar_True);
}
@@ -3129,12 +3337,40 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
op[1] = arr;
op[2] = ind;
- /* Likewise with accumulate, must do UPDATEIFCOPY */
if (out != NULL || ndim > 1 || !PyArray_ISALIGNED(arr) ||
!PyArray_EquivTypes(op_dtypes[0], PyArray_DESCR(arr))) {
need_outer_iterator = 1;
}
+ /* Special case when the index array's size is zero */
+ if (ind_size == 0) {
+ if (out == NULL) {
+ npy_intp out_shape[NPY_MAXDIMS];
+ memcpy(out_shape, PyArray_SHAPE(arr),
+ PyArray_NDIM(arr) * NPY_SIZEOF_INTP);
+ out_shape[axis] = 0;
+ Py_INCREF(op_dtypes[0]);
+ op[0] = out = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, op_dtypes[0],
+ PyArray_NDIM(arr), out_shape, NULL, NULL,
+ 0, NULL);
+ if (out == NULL) {
+ goto fail;
+ }
+ }
+ else {
+ /* Allow any zero-sized output array in this case */
+ if (PyArray_SIZE(out) != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "output operand shape for reduceat is "
+ "incompatible with index array of shape (0,)");
+ goto fail;
+ }
+ }
+
+ goto finish;
+ }
+
if (need_outer_iterator) {
npy_uint32 flags = NPY_ITER_ZEROSIZE_OK|
NPY_ITER_REFS_OK|
@@ -3142,7 +3378,8 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
/*
* The way reduceat is set up, we can't do buffering,
- * so make a copy instead when necessary.
+ * so make a copy instead when necessary using
+ * the UPDATEIFCOPY flag
*/
/* The per-operand flags for the outer loop */
@@ -3556,31 +3793,16 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
* 'prod', et al, also allow a reduction where axis=0, even
* though this is technically incorrect.
*/
- if (operation == UFUNC_REDUCE &&
- (naxes == 0 || (naxes == 1 && axes[0] == 0))) {
+ naxes = 0;
+
+ if (!(operation == UFUNC_REDUCE &&
+ (naxes == 0 || (naxes == 1 && axes[0] == 0)))) {
+ PyErr_Format(PyExc_TypeError, "cannot %s on a scalar",
+ _reduce_type[operation]);
Py_XDECREF(otype);
- /* If there's an output parameter, copy the value */
- if (out != NULL) {
- if (PyArray_CopyInto(out, mp) < 0) {
- Py_DECREF(mp);
- return NULL;
- }
- else {
- Py_DECREF(mp);
- Py_INCREF(out);
- return (PyObject *)out;
- }
- }
- /* Otherwise return the array unscathed */
- else {
- return PyArray_Return(mp);
- }
+ Py_DECREF(mp);
+ return NULL;
}
- PyErr_Format(PyExc_TypeError, "cannot %s on a scalar",
- _reduce_type[operation]);
- Py_XDECREF(otype);
- Py_DECREF(mp);
- return NULL;
}
/*
@@ -4114,6 +4336,14 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
}
ufunc->doc = doc;
+ ufunc->op_flags = PyArray_malloc(sizeof(npy_uint32)*ufunc->nargs);
+ if (ufunc->op_flags == NULL) {
+ return PyErr_NoMemory();
+ }
+ memset(ufunc->op_flags, 0, sizeof(npy_uint32)*ufunc->nargs);
+
+ ufunc->iter_flags = 0;
+
/* generalized ufunc */
ufunc->core_enabled = 0;
ufunc->core_num_dim_ix = 0;
@@ -4200,9 +4430,19 @@ cmp_arg_types(int *arg1, int *arg2, int n)
static NPY_INLINE void
_free_loop1d_list(PyUFunc_Loop1d *data)
{
+ int i;
+
while (data != NULL) {
PyUFunc_Loop1d *next = data->next;
PyArray_free(data->arg_types);
+
+ if (data->arg_dtypes != NULL) {
+ for (i = 0; i < data->nargs; i++) {
+ Py_DECREF(data->arg_dtypes[i]);
+ }
+ PyArray_free(data->arg_dtypes);
+ }
+
PyArray_free(data);
data = next;
}
@@ -4225,6 +4465,112 @@ _loop1d_list_free(void *ptr)
#endif
+/*
+ * This function allows the user to register a 1-d loop with an already
+ * created ufunc. This function is similar to RegisterLoopForType except
+ * that it allows a 1-d loop to be registered with PyArray_Descr objects
+ * instead of dtype type num values. This allows a 1-d loop to be registered
+ * for a structured array dtype or a custom dtype. The ufunc is called
+ * whenever any of it's input arguments match the user_dtype argument.
+ * ufunc - ufunc object created from call to PyUFunc_FromFuncAndData
+ * user_dtype - dtype that ufunc will be registered with
+ * function - 1-d loop function pointer
+ * arg_dtypes - array of dtype objects describing the ufunc operands
+ * data - arbitrary data pointer passed in to loop function
+ */
+/*UFUNC_API*/
+NPY_NO_EXPORT int
+PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
+ PyArray_Descr *user_dtype,
+ PyUFuncGenericFunction function,
+ PyArray_Descr **arg_dtypes,
+ void *data)
+{
+ int i;
+ int result = 0;
+ int *arg_typenums;
+ PyObject *key, *cobj;
+
+ if (user_dtype == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "unknown user defined struct dtype");
+ return -1;
+ }
+
+ key = PyInt_FromLong((long) user_dtype->type_num);
+ if (key == NULL) {
+ return -1;
+ }
+
+ arg_typenums = PyArray_malloc(ufunc->nargs * sizeof(int));
+ if (arg_typenums == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ if (arg_dtypes != NULL) {
+ for (i = 0; i < ufunc->nargs; i++) {
+ arg_typenums[i] = arg_dtypes[i]->type_num;
+ }
+ }
+ else {
+ for (i = 0; i < ufunc->nargs; i++) {
+ arg_typenums[i] = user_dtype->type_num;
+ }
+ }
+
+ result = PyUFunc_RegisterLoopForType(ufunc, user_dtype->type_num,
+ function, arg_typenums, data);
+
+ if (result == 0) {
+ cobj = PyDict_GetItem(ufunc->userloops, key);
+ if (cobj == NULL) {
+ PyErr_SetString(PyExc_KeyError,
+ "userloop for user dtype not found");
+ result = -1;
+ }
+ else {
+ PyUFunc_Loop1d *current, *prev = NULL;
+ int cmp = 1;
+ current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj);
+ while (current != NULL) {
+ cmp = cmp_arg_types(current->arg_types,
+ arg_typenums, ufunc->nargs);
+ if (cmp >= 0 && current->arg_dtypes == NULL) {
+ break;
+ }
+ prev = current;
+ current = current->next;
+ }
+ if (cmp == 0 && current->arg_dtypes == NULL) {
+ current->arg_dtypes = PyArray_malloc(ufunc->nargs *
+ sizeof(PyArray_Descr*));
+ if (arg_dtypes != NULL) {
+ for (i = 0; i < ufunc->nargs; i++) {
+ current->arg_dtypes[i] = arg_dtypes[i];
+ Py_INCREF(current->arg_dtypes[i]);
+ }
+ }
+ else {
+ for (i = 0; i < ufunc->nargs; i++) {
+ current->arg_dtypes[i] = user_dtype;
+ Py_INCREF(current->arg_dtypes[i]);
+ }
+ }
+ current->nargs = ufunc->nargs;
+ }
+ else {
+ result = -1;
+ }
+ }
+ }
+
+ PyArray_free(arg_typenums);
+
+ Py_DECREF(key);
+
+ return result;
+}
+
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
@@ -4240,7 +4586,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
int *newtypes=NULL;
descr=PyArray_DescrFromType(usertype);
- if ((usertype < NPY_USERDEF) || (descr==NULL)) {
+ if ((usertype < NPY_USERDEF && usertype != NPY_VOID) || (descr==NULL)) {
PyErr_SetString(PyExc_TypeError, "unknown user-defined type");
return -1;
}
@@ -4276,6 +4622,8 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
funcdata->arg_types = newtypes;
funcdata->data = data;
funcdata->next = NULL;
+ funcdata->arg_dtypes = NULL;
+ funcdata->nargs = 0;
/* Get entry for this user-defined type*/
cobj = PyDict_GetItem(ufunc->userloops, key);
@@ -4363,6 +4711,9 @@ ufunc_dealloc(PyUFuncObject *ufunc)
if (ufunc->ptr) {
PyArray_free(ufunc->ptr);
}
+ if (ufunc->op_flags) {
+ PyArray_free(ufunc->op_flags);
+ }
Py_XDECREF(ufunc->userloops);
Py_XDECREF(ufunc->obj);
PyArray_free(ufunc);
@@ -4786,9 +5137,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/* End of code for ufunc objects */
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 5b090e88e..6a16692b0 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1179,9 +1179,17 @@ find_userloop(PyUFuncObject *ufunc,
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
- for (i = 0; i < nin; ++i) {
- int type_num = dtypes[i]->type_num;
- if (type_num != last_userdef && PyTypeNum_ISUSERDEF(type_num)) {
+ for (i = 0; i < nargs; ++i) {
+ int type_num;
+
+ /* no more ufunc arguments to check */
+ if (dtypes[i] == NULL) {
+ break;
+ }
+
+ type_num = dtypes[i]->type_num;
+ if (type_num != last_userdef &&
+ (PyTypeNum_ISUSERDEF(type_num) || type_num == NPY_VOID)) {
PyObject *key, *obj;
last_userdef = type_num;
@@ -1429,7 +1437,7 @@ ufunc_loop_matches(PyUFuncObject *self,
NPY_CASTING output_casting,
int any_object,
int use_min_scalar,
- int *types,
+ int *types, PyArray_Descr **dtypes,
int *out_no_castable_output,
char *out_err_src_typecode,
char *out_err_dst_typecode)
@@ -1456,7 +1464,18 @@ ufunc_loop_matches(PyUFuncObject *self,
return 0;
}
- tmp = PyArray_DescrFromType(types[i]);
+ /*
+ * If type num is NPY_VOID and struct dtypes have been passed in,
+ * use struct dtype object. Otherwise create new dtype object
+ * from type num.
+ */
+ if (types[i] == NPY_VOID && dtypes != NULL) {
+ tmp = dtypes[i];
+ Py_INCREF(tmp);
+ }
+ else {
+ tmp = PyArray_DescrFromType(types[i]);
+ }
if (tmp == NULL) {
return -1;
}
@@ -1518,7 +1537,7 @@ ufunc_loop_matches(PyUFuncObject *self,
static int
set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
PyArray_Descr **out_dtypes,
- int *type_nums)
+ int *type_nums, PyArray_Descr **dtypes)
{
int i, nin = self->nin, nop = nin + self->nout;
@@ -1529,11 +1548,16 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
* instead of creating a new one, similarly to preserve metadata.
**/
for (i = 0; i < nop; ++i) {
+ if (dtypes != NULL) {
+ out_dtypes[i] = dtypes[i];
+ Py_XINCREF(out_dtypes[i]);
/*
* Copy the dtype from 'op' if the type_num matches,
* to preserve metadata.
*/
- if (op[i] != NULL && PyArray_DESCR(op[i])->type_num == type_nums[i]) {
+ }
+ else if (op[i] != NULL &&
+ PyArray_DESCR(op[i])->type_num == type_nums[i]) {
out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[i]));
Py_XINCREF(out_dtypes[i]);
}
@@ -1581,15 +1605,23 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
char *out_err_src_typecode,
char *out_err_dst_typecode)
{
- npy_intp i, nin = self->nin;
+ npy_intp i, nop = self->nin + self->nout;
PyUFunc_Loop1d *funcdata;
/* Use this to try to avoid repeating the same userdef loop search */
int last_userdef = -1;
- for (i = 0; i < nin; ++i) {
- int type_num = PyArray_DESCR(op[i])->type_num;
- if (type_num != last_userdef && PyTypeNum_ISUSERDEF(type_num)) {
+ for (i = 0; i < nop; ++i) {
+ int type_num;
+
+ /* no more ufunc arguments to check */
+ if (op[i] == NULL) {
+ break;
+ }
+
+ type_num = PyArray_DESCR(op[i])->type_num;
+ if (type_num != last_userdef &&
+ (PyTypeNum_ISUSERDEF(type_num) || type_num == NPY_VOID)) {
PyObject *key, *obj;
last_userdef = type_num;
@@ -1609,7 +1641,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
any_object, use_min_scalar,
- types,
+ types, funcdata->arg_dtypes,
out_no_castable_output, out_err_src_typecode,
out_err_dst_typecode)) {
/* Error */
@@ -1617,7 +1649,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
return -1;
/* Found a match */
case 1:
- set_ufunc_loop_data_types(self, op, out_dtype, types);
+ set_ufunc_loop_data_types(self, op, out_dtype, types, funcdata->arg_dtypes);
return 1;
}
@@ -1693,12 +1725,13 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
switch (ufunc_loop_matches(self, op,
casting, casting,
any_object, use_min_scalar,
- types,
+ types, NULL,
&no_castable_output, &err_src_typecode,
&err_dst_typecode)) {
/* It works */
case 1:
- set_ufunc_loop_data_types(self, op, out_dtype, types);
+ set_ufunc_loop_data_types(self, op,
+ out_dtype, types, NULL);
return 1;
/* Didn't match */
case 0:
@@ -1861,7 +1894,7 @@ linear_search_type_resolver(PyUFuncObject *self,
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
any_object, use_min_scalar,
- types,
+ types, NULL,
&no_castable_output, &err_src_typecode,
&err_dst_typecode)) {
/* Error */
@@ -1869,7 +1902,7 @@ linear_search_type_resolver(PyUFuncObject *self,
return -1;
/* Found a match */
case 1:
- set_ufunc_loop_data_types(self, op, out_dtype, types);
+ set_ufunc_loop_data_types(self, op, out_dtype, types, NULL);
return 0;
}
}
@@ -2067,7 +2100,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
switch (ufunc_loop_matches(self, op,
casting, casting,
any_object, use_min_scalar,
- types,
+ types, NULL,
&no_castable_output, &err_src_typecode,
&err_dst_typecode)) {
/* Error */
@@ -2075,7 +2108,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
return -1;
/* It worked */
case 1:
- set_ufunc_loop_data_types(self, op, out_dtype, types);
+ set_ufunc_loop_data_types(self, op, out_dtype, types, NULL);
return 0;
/* Didn't work */
case 0:
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 97f6f042c..0b789de26 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -123,6 +123,12 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
self->core_dim_ixs = NULL;
self->core_offsets = NULL;
self->core_signature = NULL;
+ self->op_flags = PyArray_malloc(sizeof(npy_uint32)*self->nargs);
+ if (self->op_flags == NULL) {
+ return PyErr_NoMemory();
+ }
+ memset(self->op_flags, 0, sizeof(npy_uint32)*self->nargs);
+ self->iter_flags = 0;
self->type_resolver = &object_ufunc_type_resolver;
self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
@@ -382,7 +388,9 @@ PyMODINIT_FUNC initumath(void)
PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI));
Py_DECREF(s);
- PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(exp(1.0)));
+ PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(NPY_E));
+ Py_DECREF(s);
+ PyDict_SetItemString(d, "euler_gamma", s = PyFloat_FromDouble(NPY_EULER));
Py_DECREF(s);
#define ADDCONST(str) PyModule_AddIntConstant(m, #str, UFUNC_##str)
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index df26dd07a..653b2eeb9 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -1,9 +1,163 @@
+from __future__ import division, absolute_import, print_function
+
import sys
+import warnings
import numpy as np
from numpy.testing import *
-from numpy.testing.utils import WarningManager
-import warnings
+from numpy.compat import sixu
+
+# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
+NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
+
+
+def test_array_array():
+ obj = object()
+ tobj = type(object)
+ ones11 = np.ones((1, 1), np.float64)
+ tndarray = type(ones11)
+ # Test is_ndarary
+ assert_equal(np.array(ones11, dtype=np.float64), ones11)
+ old_refcount = sys.getrefcount(tndarray)
+ np.array(ones11)
+ assert_equal(old_refcount, sys.getrefcount(tndarray))
+
+ # test None
+ assert_equal(np.array(None, dtype=np.float64),
+ np.array(np.nan, dtype=np.float64))
+ old_refcount = sys.getrefcount(tobj)
+ np.array(None, dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(tobj))
+
+ # test scalar
+ assert_equal(np.array(1.0, dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ old_refcount = sys.getrefcount(np.float64)
+ np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(np.float64))
+
+ # test string
+ S2 = np.dtype((str, 2))
+ S3 = np.dtype((str, 3))
+ S5 = np.dtype((str, 5))
+ assert_equal(np.array("1.0", dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array("1.0").dtype, S3)
+ assert_equal(np.array("1.0", dtype=str).dtype, S3)
+ assert_equal(np.array("1.0", dtype=S2), np.array("1."))
+ assert_equal(np.array("1", dtype=S5), np.ones((), dtype=S5))
+
+ # test unicode
+ _unicode = globals().get("unicode")
+ if _unicode:
+ U2 = np.dtype((_unicode, 2))
+ U3 = np.dtype((_unicode, 3))
+ U5 = np.dtype((_unicode, 5))
+ assert_equal(np.array(_unicode("1.0"), dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array(_unicode("1.0")).dtype, U3)
+ assert_equal(np.array(_unicode("1.0"), dtype=_unicode).dtype, U3)
+ assert_equal(np.array(_unicode("1.0"), dtype=U2),
+ np.array(_unicode("1.")))
+ assert_equal(np.array(_unicode("1"), dtype=U5),
+ np.ones((), dtype=U5))
+
+ builtins = getattr(__builtins__, '__dict__', __builtins__)
+ assert_(isinstance(builtins, dict))
+
+ # test buffer
+ _buffer = builtins.get("buffer")
+ if _buffer and sys.version_info[:3] >= (2, 7, 5):
+ # This test fails for earlier versions of Python.
+ # Evidently a bug got fixed in 2.7.5.
+ dat = np.array(_buffer('1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(_buffer(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
+
+ # test memoryview, new version of buffer
+ _memoryview = builtins.get("memoryview")
+ if _memoryview:
+ dat = np.array(_memoryview(b'1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(_memoryview(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
+
+ # test array interface
+ a = np.array(100.0, dtype=np.float64)
+ o = type("o", (object,),
+ dict(__array_interface__=a.__array_interface__))
+ assert_equal(np.array(o, dtype=np.float64), a)
+
+ # test array_struct interface
+ a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
+ dtype=[('f0', int), ('f1', float), ('f2', str)])
+ o = type("o", (object,),
+ dict(__array_struct__=a.__array_struct__))
+ ## wasn't what I expected... is np.array(o) supposed to equal a ?
+ ## instead we get a array([...], dtype=">V18")
+ assert_equal(str(np.array(o).data), str(a.data))
+
+ # test array
+ o = type("o", (object,),
+ dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
+ assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
+
+ # test recursion
+ nested = 1.5
+ for i in range(np.MAXDIMS):
+ nested = [nested]
+
+ # no error
+ np.array(nested)
+
+ # Exceeds recursion limit
+ assert_raises(ValueError, np.array, [nested], dtype=np.float64)
+
+ # Try with lists...
+ assert_equal(np.array([None] * 10, dtype=np.float64),
+ np.empty((10,), dtype=np.float64) + np.nan)
+ assert_equal(np.array([[None]] * 10, dtype=np.float64),
+ np.empty((10, 1), dtype=np.float64) + np.nan)
+ assert_equal(np.array([[None] * 10], dtype=np.float64),
+ np.empty((1, 10), dtype=np.float64) + np.nan)
+ assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
+ np.empty((10, 10), dtype=np.float64) + np.nan)
+
+ assert_equal(np.array([1.0] * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
+ # Try with tuples
+ assert_equal(np.array((None,) * 10, dtype=np.float64),
+ np.empty((10,), dtype=np.float64) + np.nan)
+ assert_equal(np.array([(None,)] * 10, dtype=np.float64),
+ np.empty((10, 1), dtype=np.float64) + np.nan)
+ assert_equal(np.array([(None,) * 10], dtype=np.float64),
+ np.empty((1, 10), dtype=np.float64) + np.nan)
+ assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
+ np.empty((10, 10), dtype=np.float64) + np.nan)
+
+ assert_equal(np.array((1.0,) * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
def test_fastCopyAndTranspose():
# 0D array
@@ -82,7 +236,44 @@ def test_array_astype():
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
- assert_(type(b) != np.matrix)
+ assert_(type(b) is not np.matrix)
+
+ # Make sure converting from string object to fixed length string
+ # does not truncate.
+ a = np.array([b'a'*100], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S100'))
+ a = np.array([sixu('a')*100], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U100'))
+
+ # Same test as above but for strings shorter than 64 characters
+ a = np.array([b'a'*10], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S10'))
+ a = np.array([sixu('a')*10], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U10'))
+
+ a = np.array(123456789012345678901234567890, dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='O').astype('U')
+ assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+
+ a = np.array([123456789012345678901234567890], dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array([123456789012345678901234567890], dtype='O').astype('U')
+ assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+
+ a = np.array(123456789012345678901234567890, dtype='S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='U')
+ assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30'))
+
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2,3)
@@ -147,10 +338,13 @@ def test_copy_order():
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
- if strides:
- assert_equal(x.strides, y.strides)
- else:
- assert_(x.strides != y.strides)
+ # This check is impossible only because
+ # NPY_RELAXED_STRIDES_CHECKING changes the strides actively
+ if not NPY_RELAXED_STRIDES_CHECKING:
+ if strides:
+ assert_equal(x.strides, y.strides)
+ else:
+ assert_(x.strides != y.strides)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
@@ -204,7 +398,8 @@ def test_copy_order():
def test_contiguous_flags():
a = np.ones((4,4,1))[::2,:,:]
- a.strides = a.strides[:2] + (-123,)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ a.strides = a.strides[:2] + (-123,)
b = np.ones((2,2,1,2,2)).swapaxes(3,4)
def check_contig(a, ccontig, fcontig):
@@ -214,8 +409,12 @@ def test_contiguous_flags():
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
- check_contig(np.empty((2,2,0,2,2)), True, True)
- check_contig(np.array([[[1],[2]]], order='F'), True, True)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ check_contig(np.empty((2,2,0,2,2)), True, True)
+ check_contig(np.array([[[1],[2]]], order='F'), True, True)
+ else:
+ check_contig(np.empty((2,2,0,2,2)), True, False)
+ check_contig(np.array([[[1],[2]]], order='F'), False, True)
check_contig(np.empty((2,2)), True, False)
check_contig(np.empty((2,2), order='F'), False, True)
@@ -224,15 +423,30 @@ def test_contiguous_flags():
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
- # Check slicing update of flags and :
- check_contig(a[0], True, True)
- check_contig(a[None,::4,...,None], True, True)
- check_contig(b[0,0,...], False, True)
- check_contig(b[:,:,0:0,:,:], True, True)
+ if NPY_RELAXED_STRIDES_CHECKING:
+ # Check slicing update of flags and :
+ check_contig(a[0], True, True)
+ check_contig(a[None,::4,...,None], True, True)
+ check_contig(b[0,0,...], False, True)
+ check_contig(b[:,:,0:0,:,:], True, True)
+ else:
+ # Check slicing update of flags:
+ check_contig(a[0], True, False)
+ # Would be nice if this was C-Contiguous:
+ check_contig(a[None,0,...,None], False, False)
+ check_contig(b[0,0,0,...], False, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
check_contig(np.ones((1,3,1)).squeeze(), True, True)
+def test_broadcast_arrays():
+ # Test user defined dtypes
+ a = np.array([(1,2,3)], dtype='u4,u4,u4')
+ b = np.array([(1,2,3),(4,5,6),(7,8,9)], dtype='u4,u4,u4')
+ result = np.broadcast_arrays(a, b)
+ assert_equal(result[0], np.array([(1,2,3),(1,2,3),(1,2,3)], dtype='u4,u4,u4'))
+ assert_equal(result[1], np.array([(1,2,3),(4,5,6),(7,8,9)], dtype='u4,u4,u4'))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 2a2a97336..44bf5f397 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -1,8 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+from __future__ import division, absolute_import, print_function
+
import sys
import numpy as np
from numpy.testing import *
+from numpy.compat import sixu
class TestArrayRepr(object):
def test_nan_inf(self):
@@ -155,7 +158,7 @@ def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
- x = np.array([u'\xe9'], dtype=object)
+ x = np.array([sixu('\xe9')], dtype=object)
assert_equal(repr(x), expected)
diff --git a/numpy/core/tests/test_blasdot.py b/numpy/core/tests/test_blasdot.py
index 73c3c4a05..2e99cf5b0 100644
--- a/numpy/core/tests/test_blasdot.py
+++ b/numpy/core/tests/test_blasdot.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
import sys
from numpy.core import zeros, float64
@@ -47,7 +49,7 @@ def test_dot_3args():
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
- for i in xrange(12):
+ for i in range(12):
np.dot(f,v,r)
assert_equal(sys.getrefcount(r), 2)
r2 = np.dot(f,v,out=None)
@@ -88,4 +90,64 @@ def test_dot_3args_errors():
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, np.dot, f, v, r)
-
+def test_dot_array_order():
+ """ Test numpy dot with different order C, F
+
+ Comparing results with multiarray dot.
+ Double and single precisions array are compared using relative
+ precision of 7 and 5 decimals respectively.
+ Use 30 decimal when comparing exact operations like:
+ (a.b)' = b'.a'
+ """
+ _dot = np.core.multiarray.dot
+ a_dim, b_dim, c_dim = 10, 4, 7
+ orders = ["C", "F"]
+ dtypes_prec = {np.float64: 7, np.float32: 5}
+ np.random.seed(7)
+
+ for arr_type, prec in dtypes_prec.items():
+ for a_order in orders:
+ a = np.asarray(np.random.randn(a_dim, a_dim),
+ dtype=arr_type, order=a_order)
+ assert_array_equal(np.dot(a, a), a.dot(a))
+ # (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal
+ assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec)
+
+ #
+ # Check with making explicit copy
+ #
+ a_T = a.T.copy(order=a_order)
+ assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec)
+ assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec)
+ assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec)
+
+ #
+ # Compare with multiarray dot
+ #
+ assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec)
+ assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec)
+ assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec)
+ assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec)
+ for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T):
+ assert res.flags.c_contiguous
+
+ for b_order in orders:
+ b = np.asarray(np.random.randn(a_dim, b_dim),
+ dtype=arr_type, order=b_order)
+ b_T = b.T.copy(order=b_order)
+ assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec)
+ assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec)
+ # (b'.a)' = a'.b
+ assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec)
+ assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec)
+ assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec)
+
+
+ for c_order in orders:
+ c = np.asarray(np.random.randn(b_dim, c_dim),
+ dtype=arr_type, order=c_order)
+ c_T = c.T.copy(order=c_order)
+ assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec)
+ assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec)
+ assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec)
+ assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index fbde86b57..4ba0c048a 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os, pickle
import numpy
import numpy as np
@@ -48,12 +50,11 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
- assert_raises(TypeError, np.dtype, 'M4')
- assert_raises(TypeError, np.dtype, 'm4')
- assert_raises(TypeError, np.dtype, 'M7')
- assert_raises(TypeError, np.dtype, 'm7')
- assert_raises(TypeError, np.dtype, 'M16')
- assert_raises(TypeError, np.dtype, 'm16')
+ assert_warns(DeprecationWarning, np.dtype, 'm4')
+ assert_warns(DeprecationWarning, np.dtype, 'M7')
+ assert_warns(DeprecationWarning, np.dtype, 'm7')
+ assert_warns(DeprecationWarning, np.dtype, 'M16')
+ assert_warns(DeprecationWarning, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 85bb623a4..4656c842f 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -1,10 +1,12 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy.core import *
import numpy as np
import sys
from numpy.core.multiarray import _vec_string
-from numpy.compat import asbytes, asbytes_nested
+from numpy.compat import asbytes, asbytes_nested, sixu
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
@@ -19,12 +21,12 @@ class TestBasic(TestCase):
['long', '0123456789']]))
def test_from_object_array_unicode(self):
- A = np.array([['abc', u'Sigma \u03a3'],
+ A = np.array([['abc', sixu('Sigma \u03a3')],
['long ', '0123456789']], dtype='O')
self.assertRaises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
- assert_array_equal(B, [['abc', u'Sigma \u03a3'],
+ assert_array_equal(B, [['abc', sixu('Sigma \u03a3')],
['long', '0123456789']])
def test_from_string_array(self):
@@ -45,7 +47,7 @@ class TestBasic(TestCase):
assert_(C[0,0] == A[0,0])
def test_from_unicode_array(self):
- A = np.array([['abc', u'Sigma \u03a3'],
+ A = np.array([['abc', sixu('Sigma \u03a3')],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
@@ -62,7 +64,7 @@ class TestBasic(TestCase):
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
- B = np.char.array([u'\u03a3'])
+ B = np.char.array([sixu('\u03a3')])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
@@ -72,7 +74,7 @@ class TestBasic(TestCase):
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
- A = np.char.array(u'\u03a3')
+ A = np.char.array(sixu('\u03a3'))
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
@@ -184,9 +186,9 @@ class TestInformation(TestCase):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
- self.B = np.array([[u' \u03a3 ', u''],
- [u'12345', u'MixedCase'],
- [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
+ self.B = np.array([[sixu(' \u03a3 '), sixu('')],
+ [sixu('12345'), sixu('MixedCase')],
+ [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
@@ -283,9 +285,9 @@ class TestMethods(TestCase):
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
- self.B = np.array([[u' \u03a3 ', u''],
- [u'12345', u'MixedCase'],
- [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
+ self.B = np.array([[sixu(' \u03a3 '), sixu('')],
+ [sixu('12345'), sixu('MixedCase')],
+ [sixu('123 \t 345 \0 '), sixu('UPPER')]]).view(np.chararray)
def test_capitalize(self):
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
@@ -295,7 +297,7 @@ class TestMethods(TestCase):
['123 \t 345 \0 ', 'Upper']]))
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), [
- [u' \u03c3 ', ''],
+ [sixu(' \u03c3 '), ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']])
@@ -323,7 +325,7 @@ class TestMethods(TestCase):
def test_encode(self):
B = self.B.encode('unicode_escape')
- assert_(B[0][0] == asbytes(r' \u03a3 '))
+ assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
def test_expandtabs(self):
T = self.A.expandtabs()
@@ -371,9 +373,9 @@ class TestMethods(TestCase):
['123 \t 345 \0 ', 'upper']]))
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), [
- [u' \u03c3 ', u''],
- [u'12345', u'mixedcase'],
- [u'123 \t 345 \0 ', u'upper']])
+ [sixu(' \u03c3 '), sixu('')],
+ [sixu('12345'), sixu('mixedcase')],
+ [sixu('123 \t 345 \0 '), sixu('upper')]])
def test_lstrip(self):
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
@@ -388,18 +390,17 @@ class TestMethods(TestCase):
['23 \t 345 \x00', 'UPPER']]))
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), [
- [u'\u03a3 ', ''],
+ [sixu('\u03a3 '), ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']])
def test_partition(self):
- if sys.version_info >= (2, 5):
- P = self.A.partition(asbytes_nested(['3', 'M']))
- assert_(issubclass(P.dtype.type, np.string_))
- assert_array_equal(P, asbytes_nested([
- [(' abc ', '', ''), ('', '', '')],
- [('12', '3', '45'), ('', 'M', 'ixedCase')],
- [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]]))
+ P = self.A.partition(asbytes_nested(['3', 'M']))
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, asbytes_nested([
+ [(' abc ', '', ''), ('', '', '')],
+ [('12', '3', '45'), ('', 'M', 'ixedCase')],
+ [('12', '3', ' \t 345 \0 '), ('UPPER', '', '')]]))
def test_replace(self):
R = self.A.replace(asbytes_nested(['3', 'a']),
@@ -412,11 +413,11 @@ class TestMethods(TestCase):
if sys.version_info[0] < 3:
# NOTE: b'abc'.replace(b'a', 'b') is not allowed on Py3
- R = self.A.replace(asbytes('a'), u'\u03a3')
+ R = self.A.replace(asbytes('a'), sixu('\u03a3'))
assert_(issubclass(R.dtype.type, np.unicode_))
assert_array_equal(R, [
- [u' \u03a3bc ', ''],
- ['12345', u'MixedC\u03a3se'],
+ [sixu(' \u03a3bc '), ''],
+ ['12345', sixu('MixedC\u03a3se')],
['123 \t 345 \x00', 'UPPER']])
def test_rjust(self):
@@ -435,13 +436,12 @@ class TestMethods(TestCase):
[' FOO', ' FOO']]))
def test_rpartition(self):
- if sys.version_info >= (2, 5):
- P = self.A.rpartition(asbytes_nested(['3', 'M']))
- assert_(issubclass(P.dtype.type, np.string_))
- assert_array_equal(P, asbytes_nested([
- [('', '', ' abc '), ('', '', '')],
- [('12', '3', '45'), ('', 'M', 'ixedCase')],
- [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]]))
+ P = self.A.rpartition(asbytes_nested(['3', 'M']))
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, asbytes_nested([
+ [('', '', ' abc '), ('', '', '')],
+ [('12', '3', '45'), ('', 'M', 'ixedCase')],
+ [('123 \t ', '3', '45 \0 '), ('', '', 'UPPER')]]))
def test_rsplit(self):
A = self.A.rsplit(asbytes('3'))
@@ -464,7 +464,7 @@ class TestMethods(TestCase):
['123 \t 345 \x00', 'UPP']]))
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), [
- [u' \u03a3', ''],
+ [sixu(' \u03a3'), ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
@@ -481,7 +481,7 @@ class TestMethods(TestCase):
['23 \t 345 \x00', 'UPP']]))
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), [
- [u'\u03a3', ''],
+ [sixu('\u03a3'), ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']])
@@ -507,9 +507,9 @@ class TestMethods(TestCase):
['123 \t 345 \0 ', 'upper']]))
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), [
- [u' \u03c3 ', u''],
- [u'12345', u'mIXEDcASE'],
- [u'123 \t 345 \0 ', u'upper']])
+ [sixu(' \u03c3 '), sixu('')],
+ [sixu('12345'), sixu('mIXEDcASE')],
+ [sixu('123 \t 345 \0 '), sixu('upper')]])
def test_title(self):
assert_(issubclass(self.A.title().dtype.type, np.string_))
@@ -519,9 +519,9 @@ class TestMethods(TestCase):
['123 \t 345 \0 ', 'Upper']]))
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), [
- [u' \u03a3 ', u''],
- [u'12345', u'Mixedcase'],
- [u'123 \t 345 \0 ', u'Upper']])
+ [sixu(' \u03a3 '), sixu('')],
+ [sixu('12345'), sixu('Mixedcase')],
+ [sixu('123 \t 345 \0 '), sixu('Upper')]])
def test_upper(self):
assert_(issubclass(self.A.upper().dtype.type, np.string_))
@@ -531,9 +531,9 @@ class TestMethods(TestCase):
['123 \t 345 \0 ', 'UPPER']]))
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), [
- [u' \u03a3 ', u''],
- [u'12345', u'MIXEDCASE'],
- [u'123 \t 345 \0 ', u'UPPER']])
+ [sixu(' \u03a3 '), sixu('')],
+ [sixu('12345'), sixu('MIXEDCASE')],
+ [sixu('123 \t 345 \0 '), sixu('UPPER')]])
def test_isnumeric(self):
def fail():
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
new file mode 100644
index 000000000..2bedbca09
--- /dev/null
+++ b/numpy/core/tests/test_deprecations.py
@@ -0,0 +1,292 @@
+"""
+Tests related to deprecation warnings. Also a convenient place
+to document how deprecations should eventually be turned into errors.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import operator
+import warnings
+from nose.plugins.skip import SkipTest
+
+import numpy as np
+from numpy.testing import dec, run_module_suite, assert_raises
+
+
+class _DeprecationTestCase(object):
+ # Just as warning: warnings uses re.match, so the start of this message
+ # must match.
+ message = ''
+
+ def setUp(self):
+ self.warn_ctx = warnings.catch_warnings(record=True)
+ self.log = self.warn_ctx.__enter__()
+
+ # Do *not* ignore other DeprecationWarnings. Ignoring warnings
+ # can give very confusing results because of
+ # http://bugs.python.org/issue4180 and it is probably simplest to
+ # try to keep the tests cleanly giving only the right warning type.
+ # (While checking them set to "error" those are ignored anyway)
+ # We still have them show up, because otherwise they would be raised
+ warnings.filterwarnings("always", category=DeprecationWarning)
+ warnings.filterwarnings("always", message=self.message,
+ category=DeprecationWarning)
+
+
+ def tearDown(self):
+ self.warn_ctx.__exit__()
+
+
+ def assert_deprecated(self, function, num=1, ignore_others=False,
+ function_fails=False,
+ exceptions=(DeprecationWarning,), args=(), kwargs={}):
+ """Test if DeprecationWarnings are given and raised.
+
+ This first checks if the function when called gives `num`
+ DeprecationWarnings, after that it tries to raise these
+ DeprecationWarnings and compares them with `exceptions`.
+ The exceptions can be different for cases where this code path
+ is simply not anticipated and the exception is replaced.
+
+ Parameters
+ ----------
+ f : callable
+ The function to test
+ num : int
+ Number of DeprecationWarnings to expect. This should normally be 1.
+ ignore_other : bool
+ Whether warnings of the wrong type should be ignored (note that
+ the message is not checked)
+ function_fails : bool
+ If the function would normally fail, setting this will check for
+ warnings inside a try/except block.
+ exceptions : Exception or tuple of Exceptions
+ Exception to expect when turning the warnings into an error.
+ The default checks for DeprecationWarnings. If exceptions is
+ empty the function is expected to run successfull.
+ args : tuple
+ Arguments for `f`
+ kwargs : dict
+ Keyword arguments for `f`
+ """
+ # reset the log
+ self.log[:] = []
+
+ try:
+ function(*args, **kwargs)
+ except (Exception if function_fails else tuple()):
+ pass
+ # just in case, clear the registry
+ num_found = 0
+ for warning in self.log:
+ if warning.category is DeprecationWarning:
+ num_found += 1
+ elif not ignore_others:
+ raise AssertionError("expected DeprecationWarning but %s given"
+ % warning.category)
+ if num_found != num:
+ raise AssertionError("%i warnings found but %i expected"
+ % (len(self.log), num))
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error", message=self.message,
+ category=DeprecationWarning)
+
+ try:
+ function(*args, **kwargs)
+ if exceptions != tuple():
+ raise AssertionError("No error raised during function call")
+ except exceptions:
+ if exceptions == tuple():
+ raise AssertionError("Error raised during function call")
+
+
+ def assert_not_deprecated(self, function, args=(), kwargs={}):
+ """Test if DeprecationWarnings are given and raised.
+
+ This is just a shorthand for:
+
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+ """
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+
+
+class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase):
+ """
+ These test that ``DeprecationWarning`` is given when you try to use
+ non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+ and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+ After deprecation, changes need to be done inside conversion_utils.c
+ in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter.
+ In iterators.c the function slice_GetIndices could be removed in favor
+ of its python equivalent and in mapping.c the function _tuple_of_integers
+ can be simplified (if ``np.array([1]).__index__()`` is also deprecated).
+
+ As for the deprecation time-frame: via Ralf Gommers,
+
+ "Hard to put that as a version number, since we don't know if the
+ version after 1.8 will be 6 months or 2 years after. I'd say 2
+ years is reasonable."
+
+ I interpret this to mean 2 years after the 1.8 release. Possibly
+ giving a PendingDeprecationWarning before that (which is visible
+ by default)
+
+ """
+ message = "using a non-integer number instead of an integer " \
+ "will result in an error in the future"
+
+ def test_indexing(self):
+ a = np.array([[[5]]])
+ def assert_deprecated(*args, **kwargs):
+ self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
+
+ assert_deprecated(lambda: a[0.0])
+ assert_deprecated(lambda: a[0, 0.0])
+ assert_deprecated(lambda: a[0.0, 0])
+ assert_deprecated(lambda: a[0.0, :])
+ assert_deprecated(lambda: a[:, 0.0])
+ assert_deprecated(lambda: a[:, 0.0, :])
+ assert_deprecated(lambda: a[0.0, :, :], num=2) # [1]
+ assert_deprecated(lambda: a[0, 0, 0.0])
+ assert_deprecated(lambda: a[0.0, 0, 0])
+ assert_deprecated(lambda: a[0, 0.0, 0])
+ assert_deprecated(lambda: a[-1.4])
+ assert_deprecated(lambda: a[0, -1.4])
+ assert_deprecated(lambda: a[-1.4, 0])
+ assert_deprecated(lambda: a[-1.4, :])
+ assert_deprecated(lambda: a[:, -1.4])
+ assert_deprecated(lambda: a[:, -1.4, :])
+ assert_deprecated(lambda: a[-1.4, :, :], num=2) # [1]
+ assert_deprecated(lambda: a[0, 0, -1.4])
+ assert_deprecated(lambda: a[-1.4, 0, 0])
+ assert_deprecated(lambda: a[0, -1.4, 0])
+ # [1] These are duplicate because of the _tuple_of_integers quick check
+
+ # Test that the slice parameter deprecation warning doesn't mask
+ # the scalar index warning.
+ assert_deprecated(lambda: a[0.0:, 0.0], num=2)
+ assert_deprecated(lambda: a[0.0:, 0.0, :], num=2)
+
+
+ def test_valid_indexing(self):
+ a = np.array([[[5]]])
+ assert_not_deprecated = self.assert_not_deprecated
+
+ assert_not_deprecated(lambda: a[np.array([0])])
+ assert_not_deprecated(lambda: a[[0, 0]])
+ assert_not_deprecated(lambda: a[:, [0, 0]])
+ assert_not_deprecated(lambda: a[:, 0, :])
+ assert_not_deprecated(lambda: a[:, :, :])
+
+
+ def test_slicing(self):
+ a = np.array([[5]])
+ def assert_deprecated(*args, **kwargs):
+ self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
+
+ # start as float.
+ assert_deprecated(lambda: a[0.0:])
+ assert_deprecated(lambda: a[0:, 0.0:2])
+ assert_deprecated(lambda: a[0.0::2, :0])
+ assert_deprecated(lambda: a[0.0:1:2, :])
+ assert_deprecated(lambda: a[:, 0.0:])
+ # stop as float.
+ assert_deprecated(lambda: a[:0.0])
+ assert_deprecated(lambda: a[:0, 1:2.0])
+ assert_deprecated(lambda: a[:0.0:2, :0])
+ assert_deprecated(lambda: a[:0.0, :])
+ assert_deprecated(lambda: a[:, 0:4.0:2])
+ # step as float.
+ assert_deprecated(lambda: a[::1.0])
+ assert_deprecated(lambda: a[0:, :2:2.0])
+ assert_deprecated(lambda: a[1::4.0, :0])
+ assert_deprecated(lambda: a[::5.0, :])
+ assert_deprecated(lambda: a[:, 0:4:2.0])
+ # mixed.
+ assert_deprecated(lambda: a[1.0:2:2.0], num=2)
+ assert_deprecated(lambda: a[1.0::2.0], num=2)
+ assert_deprecated(lambda: a[0:, :2.0:2.0], num=2)
+ assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2)
+ assert_deprecated(lambda: a[1.0:5.0:5.0, :], num=3)
+ assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3)
+ # should still get the DeprecationWarning if step = 0.
+ assert_deprecated(lambda: a[::0.0], function_fails=True)
+
+
+ def test_valid_slicing(self):
+ a = np.array([[[5]]])
+ assert_not_deprecated = self.assert_not_deprecated
+
+ assert_not_deprecated(lambda: a[::])
+ assert_not_deprecated(lambda: a[0:])
+ assert_not_deprecated(lambda: a[:2])
+ assert_not_deprecated(lambda: a[0:2])
+ assert_not_deprecated(lambda: a[::2])
+ assert_not_deprecated(lambda: a[1::2])
+ assert_not_deprecated(lambda: a[:2:2])
+ assert_not_deprecated(lambda: a[1:2:2])
+
+
+ def test_non_integer_argument_deprecations(self):
+ a = np.array([[5]])
+
+ self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2)
+ self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1)))
+ self.assert_deprecated(np.take, args=(a, [0], 1.))
+ self.assert_deprecated(np.take, args=(a, [0], np.float64(1.)))
+
+
+class TestBooleanArgumentDeprecation(_DeprecationTestCase):
+ """This tests that using a boolean as integer argument/indexing is
+ deprecated.
+
+ This should be kept in sync with TestFloatNonIntegerArgumentDeprecation
+ and like it is handled in PyArray_PyIntAsIntp.
+ """
+ message = "using a boolean instead of an integer " \
+ "will result in an error in the future"
+
+ def test_bool_as_int_argument(self):
+ a = np.array([[[1]]])
+
+ self.assert_deprecated(np.reshape, args=(a, (True, -1)))
+ self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1)))
+ # Note that operator.index(np.array(True)) does not work, a boolean
+ # array is thus also deprecated, but not with the same message:
+ assert_raises(TypeError, operator.index, np.array(True))
+ self.assert_deprecated(np.take, args=(a, [0], False))
+ self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3)
+ self.assert_deprecated(lambda: a[False,0], exceptions=IndexError)
+ self.assert_deprecated(lambda: a[False,0,0], exceptions=IndexError)
+
+
+class TestArrayToIndexDeprecation(_DeprecationTestCase):
+ """This tests that creating an an index from an array is deprecated
+ if the array is not 0d.
+
+ This can probably be deprecated somewhat faster then the integer
+ deprecations. The deprecation period started with NumPy 1.8.
+ For deprecation this needs changing of array_index in number.c
+ """
+ message = "converting an array with ndim \> 0 to an index will result " \
+ "in an error in the future"
+
+ def test_array_to_index_deprecation(self):
+ # This drops into the non-integer deprecation, which is ignored here,
+ # so no exception is expected. The raising is effectively tested above.
+ a = np.array([[[1]]])
+
+ self.assert_deprecated(operator.index, args=(np.array([1]),))
+ self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=())
+ self.assert_deprecated(np.take, args=(a, [0], a), exceptions=())
+ # Check slicing. Normal indexing checks arrays specifically.
+ self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3)
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 5645a0824..7493c20e2 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import numpy as np
from numpy.testing import *
@@ -47,13 +49,38 @@ class TestBuiltin(TestCase):
self.assertTrue(hash(left) == hash(right))
def test_invalid_types(self):
- # Make sure invalid type strings raise exceptions
- for typestr in ['O3', 'O5', 'O7', 'b3', 'h4', 'I5', 'l4', 'l8',
- 'L4', 'L8', 'q8', 'q16', 'Q8', 'Q16', 'e3',
- 'f5', 'd8', 't8', 'g12', 'g16',
- 'NA[u4,0xffffffff]']:
- #print typestr
- assert_raises(TypeError, np.dtype, typestr)
+ # Make sure invalid type strings raise a warning.
+ # For now, display a deprecation warning for invalid
+ # type sizes. In the future this should be changed
+ # to an exception.
+
+ assert_warns(DeprecationWarning, np.dtype, 'O3')
+ assert_warns(DeprecationWarning, np.dtype, 'O5')
+ assert_warns(DeprecationWarning, np.dtype, 'O7')
+ assert_warns(DeprecationWarning, np.dtype, 'b3')
+ assert_warns(DeprecationWarning, np.dtype, 'h4')
+ assert_warns(DeprecationWarning, np.dtype, 'I5')
+ assert_warns(DeprecationWarning, np.dtype, 'e3')
+ assert_warns(DeprecationWarning, np.dtype, 'f5')
+
+ if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
+ assert_warns(DeprecationWarning, np.dtype, 'g12')
+ elif np.dtype('g').itemsize == 12:
+ assert_warns(DeprecationWarning, np.dtype, 'g16')
+
+ if np.dtype('l').itemsize == 8:
+ assert_warns(DeprecationWarning, np.dtype, 'l4')
+ assert_warns(DeprecationWarning, np.dtype, 'L4')
+ else:
+ assert_warns(DeprecationWarning, np.dtype, 'l8')
+ assert_warns(DeprecationWarning, np.dtype, 'L8')
+
+ if np.dtype('q').itemsize == 8:
+ assert_warns(DeprecationWarning, np.dtype, 'q4')
+ assert_warns(DeprecationWarning, np.dtype, 'Q4')
+ else:
+ assert_warns(DeprecationWarning, np.dtype, 'q8')
+ assert_warns(DeprecationWarning, np.dtype, 'Q8')
def test_bad_param(self):
# Can't give a size that's too small
@@ -413,11 +440,14 @@ class TestString(TestCase):
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
- @dec.skipif(sys.version_info[0] > 2)
+ @dec.skipif(sys.version_info[0] >= 3)
def test_dtype_str_with_long_in_shape(self):
# Pull request #376
dt = np.dtype('(1L,)i4')
+ def test_base_dtype_with_object_type(self):
+ # Issue gh-2798
+ a = np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
class TestDtypeAttributeDeletion(object):
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index fb7ceb0db..e7480a269 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -1,10 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
import sys
+import warnings
from decimal import Decimal
import numpy as np
from numpy.testing import *
-from numpy.testing.utils import WarningManager
-import warnings
class TestEinSum(TestCase):
def test_einsum_errors(self):
@@ -239,6 +240,7 @@ class TestEinSum(TestCase):
assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype))
# multiply(a, b)
+ assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1,17):
a = np.arange(3*n, dtype=dtype).reshape(3,n)
b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
@@ -269,9 +271,7 @@ class TestEinSum(TestCase):
assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
- ctx = WarningManager()
- ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
@@ -376,8 +376,6 @@ class TestEinSum(TestCase):
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1,0],[0,1])).astype(dtype))
- finally:
- ctx.__exit__()
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index a9588463b..7eb0aba2e 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -1,37 +1,32 @@
-# The following exec statement (or something like it) is needed to
-# prevent SyntaxError on Python < 2.5. Even though this is a test,
-# SyntaxErrors are not acceptable; on Debian systems, they block
-# byte-compilation during install and thus cause the package to fail
-# to install.
-
-import sys
-if sys.version_info[:2] >= (2, 5):
- exec """
-from __future__ import with_statement
-from numpy.core import *
-from numpy.random import rand, randint
-from numpy.testing import *
+from __future__ import division, absolute_import, print_function
+
+import platform
+
+import numpy as np
+from numpy.testing import TestCase, assert_, run_module_suite, dec
+
class TestErrstate(TestCase):
+ @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
- with errstate(all='raise', under='ignore'):
- a = -arange(3)
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
# This should work
- with errstate(invalid='ignore'):
- sqrt(a)
+ with np.errstate(invalid='ignore'):
+ np.sqrt(a)
# While this should fail!
try:
- sqrt(a)
+ np.sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
def test_divide(self):
- with errstate(all='raise', under='ignore'):
- a = -arange(3)
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
# This should work
- with errstate(divide='ignore'):
+ with np.errstate(divide='ignore'):
a // 0
# While this should fail!
try:
@@ -44,14 +39,13 @@ class TestErrstate(TestCase):
def test_errcall(self):
def foo(*args):
print(args)
- olderrcall = geterrcall()
- with errstate(call=foo):
- assert_(geterrcall() is foo, 'call is not foo')
- with errstate(call=None):
- assert_(geterrcall() is None, 'call is not None')
- assert_(geterrcall() is olderrcall, 'call is not olderrcall')
-
-"""
+ olderrcall = np.geterrcall()
+ with np.errstate(call=foo):
+ assert_(np.geterrcall() is foo, 'call is not foo')
+ with np.errstate(call=None):
+ assert_(np.geterrcall() is None, 'call is not None')
+ assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index f1a08d9d5..5de733d4c 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from numpy.testing import *
from numpy import logspace, linspace
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index 8cbc79cf9..96ca66b10 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -1,5 +1,7 @@
""" Test functions for limits module.
+
"""
+from __future__ import division, absolute_import, print_function
from numpy.testing import *
@@ -41,10 +43,10 @@ class TestLongdouble(TestCase):
class TestIinfo(TestCase):
def test_basic(self):
- dts = zip(['i1', 'i2', 'i4', 'i8',
+ dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
- np.uint8, np.uint16, np.uint32, np.uint64])
+ np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
assert_equal(iinfo(dt1).min, iinfo(dt2).min)
assert_equal(iinfo(dt1).max, iinfo(dt2).max)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index cbb21d4ab..e5f2eaf9b 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -1,13 +1,17 @@
-import warnings
+from __future__ import division, absolute_import, print_function
+
+import platform
+
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
+from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \
+ dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
- except FloatingPointError, exc:
+ except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
@@ -67,8 +71,7 @@ class TestHalf(TestCase):
assert_equal(i_int,j)
def test_nans_infs(self):
- oldsettings = np.seterr(all='ignore')
- try:
+ with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
@@ -96,9 +99,6 @@ class TestHalf(TestCase):
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
- finally:
- np.seterr(**oldsettings)
-
def test_half_values(self):
"""Confirms a small number of known half values"""
@@ -357,10 +357,9 @@ class TestHalf(TestCase):
assert_equal(np.power(b32,a16).dtype, float16)
assert_equal(np.power(b32,b16).dtype, float32)
+ @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_half_fpe(self):
- """Test that half raises the correct underflows and overflows"""
- oldsettings = np.seterr(all='raise')
- try:
+ with np.errstate(all='raise'):
sx16 = np.array((1e-4,),dtype=float16)
bx16 = np.array((1e4,),dtype=float16)
sy16 = float16(1e-4)
@@ -422,8 +421,6 @@ class TestHalf(TestCase):
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
- finally:
- np.seterr(**oldsettings)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index af10df9b3..263c505de 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises, assert_equal, assert_
import sys
@@ -10,6 +12,8 @@ class TestIndexErrors(TestCase):
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
+ assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
+ assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
@@ -21,6 +25,8 @@ class TestIndexErrors(TestCase):
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
+ assert_raises(IndexError, d.take, [0], mode='wrap')
+ assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index da19d2a2a..3c74bc466 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -1,25 +1,518 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
+from itertools import product
from numpy.compat import asbytes
from numpy.testing import *
import sys, warnings
-# The C implementation of fancy indexing is relatively complicated,
-# and has many seeming inconsistencies. It also appears to lack any
-# kind of test suite, making any changes to the underlying code difficult
-# because of its fragility.
-# This file is to remedy the test suite part a little bit,
-# but hopefully NumPy indexing can be changed to be more systematic
-# at some point in the future.
+class TestIndexing(TestCase):
+ def test_none_index(self):
+ # `None` index adds newaxis
+ a = np.array([1, 2, 3])
+ assert_equal(a[None], a[np.newaxis])
+ assert_equal(a[None].ndim, a.ndim + 1)
+
+ def test_empty_tuple_index(self):
+ # Empty tuple index creates a view
+ a = np.array([1, 2, 3])
+ assert_equal(a[()], a)
+ assert_(a[()].base is a)
+ a = np.array(0)
+ assert_(isinstance(a[()], np.int_))
+
+ def test_empty_fancy_index(self):
+ # Empty list index creates an empty array
+ # with the same dtype (but with weird shape)
+ a = np.array([1, 2, 3])
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([], dtype=np.intp)
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([])
+ assert_raises(IndexError, a.__getitem__, b)
+
+ def test_ellipsis_index(self):
+ # Ellipsis index does not create a view
+ a = np.array([[1, 2, 3],
+ [4 ,5, 6],
+ [7, 8, 9]])
+ assert_equal(a[...], a)
+ assert_(a[...] is a)
+
+ # Slicing with ellipsis can skip an
+ # arbitrary number of dimensions
+ assert_equal(a[0, ...], a[0])
+ assert_equal(a[0, ...], a[0, :])
+ assert_equal(a[..., 0], a[:, 0])
+
+ # Slicing with ellipsis always results
+ # in an array, not a scalar
+ assert_equal(a[0, ..., 1], np.array(2))
+
+ def test_single_int_index(self):
+ # Single integer index selects one row
+ a = np.array([[1, 2, 3],
+ [4 ,5, 6],
+ [7, 8, 9]])
+
+ assert_equal(a[0], [1, 2, 3])
+ assert_equal(a[-1], [7, 8, 9])
+
+ # Index out of bounds produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1<<30)
+ # Index overflow produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1<<64)
+
+ def test_single_bool_index(self):
+ # Single boolean index
+ a = np.array([[1, 2, 3],
+ [4 ,5, 6],
+ [7, 8, 9]])
+
+ # Python boolean converts to integer
+ # These are being deprecated (and test in test_deprecations)
+ #assert_equal(a[True], a[1])
+ #assert_equal(a[False], a[0])
+
+ # Same with NumPy boolean scalar
+ assert_equal(a[np.array(True)], a[1])
+ assert_equal(a[np.array(False)], a[0])
+
+ def test_boolean_indexing_onedim(self):
+ # Indexing a 2-dimensional array with
+ # boolean array of length one
+ a = np.array([[ 0., 0., 0.]])
+ b = np.array([ True], dtype=bool)
+ assert_equal(a[b], a)
+ # boolean assignment
+ a[b] = 1.
+ assert_equal(a, [[1., 1., 1.]])
+
+ def test_boolean_assignment_value_mismatch(self):
+ # A boolean assignment should fail when the shape of the values
+ # cannot be broadcasted to the subscription. (see also gh-3458)
+ a = np.arange(4)
+ def f(a, v):
+ a[a > -1] = v
+
+ assert_raises(ValueError, f, a, [])
+ assert_raises(ValueError, f, a, [1, 2, 3])
+ assert_raises(ValueError, f, a[:1], [1, 2, 3])
+
+ def test_boolean_indexing_twodim(self):
+ # Indexing a 2-dimensional array with
+ # 2-dimensional boolean array
+ a = np.array([[1, 2, 3],
+ [4 ,5, 6],
+ [7, 8, 9]])
+ b = np.array([[ True, False, True],
+ [False, True, False],
+ [ True, False, True]])
+ assert_equal(a[b], [1, 3, 5, 7, 9])
+ assert_equal(a[b[1]], [[4, 5, 6]])
+ assert_equal(a[b[0]], a[b[2]])
+
+ # boolean assignment
+ a[b] = 0
+ assert_equal(a, [[0, 2, 0],
+ [4, 0, 6],
+ [0, 8, 0]])
+
+
+class TestMultiIndexingAutomated(TestCase):
+ """
+ These test use code to mimic the C-Code indexing for selection.
+
+ NOTE: * This still lacks tests for complex item setting.
+ * If you change behavoir of indexing, you might want to modify
+ these tests to try more combinations.
+ * Behavior was written to match numpy version 1.8. (though a
+ first version matched 1.7.)
+ * Only tuple indicies are supported by the mimicing code.
+ (and tested as of writing this)
+ * Error types should match most of the time as long as there
+ is only one error. For multiple errors, what gets raised
+ will usually not be the same one. They are *not* tested.
+ """
+ def setUp(self):
+ self.a = np.arange(np.prod([3,1,5,6])).reshape(3,1,5,6)
+ self.b = np.empty((3,0,5,6))
+ self.complex_indices = ['skip', Ellipsis,
+ 0,
+ # Boolean indices, up to 3-d for some special cases of eating up
+ # dimensions, also need to test all False
+ np.array(False),
+ np.array([True, False, False]),
+ np.array([[True, False], [False, True]]),
+ np.array([[[False, False], [False, False]]]),
+ # Some slices:
+ slice(-5, 5, 2),
+ slice(1, 1, 100),
+ slice(4, -1, -2),
+ slice(None,None,-3),
+ # Some Fancy indexes:
+ np.empty((0,1,1), dtype=np.intp), # empty broadcastable
+ np.array([0,1,-2]),
+ np.array([[2],[0],[1]]),
+ np.array([[0,-1], [0,1]]),
+ np.array([2,-1]),
+ np.zeros([1]*31, dtype=int), # trigger too large array.
+ np.array([0., 1.])] # invalid datatype
+ # Some simpler indices that still cover a bit more
+ self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
+ # Very simple ones to fill the rest:
+ self.fill_indices = [slice(None,None), 0]
+
+
+ def _get_multi_index(self, arr, indices):
+ """Mimic multi dimensional indexing.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed.
+ indices : tuple of index objects
+
+ Returns
+ -------
+ out : ndarray
+ An array equivalent to the indexing operation (but always a copy).
+ `arr[indices]` should be identical.
+ no_copy : bool
+ Whether the indexing operation requires a copy. If this is `True`,
+ `np.may_share_memory(arr, arr[indicies])` should be `True` (with
+ some exceptions for scalars and possibly 0-d arrays).
+
+ Notes
+ -----
+ While the function may mostly match the errors of normal indexing this
+ is generally not the case.
+ """
+ in_indices = list(indices)
+ indices = []
+ # if False, this is a fancy or boolean index
+ no_copy = True
+ # number of fancy/scalar indexes that are not consecutive
+ num_fancy = 0
+ # number of dimensions indexed by a "fancy" index
+ fancy_dim = 0
+ # NOTE: This is a funny twist (and probably OK to change).
+ # The boolean array has illegal indexes, but this is
+ # allowed if the broadcasted fancy-indices are 0-sized.
+ # This variable is to catch that case.
+ error_unless_broadcast_to_empty = False
+
+ # We need to handle Ellipsis and make arrays from indices, also
+ # check if this is fancy indexing (set no_copy).
+ ndim = 0
+ ellipsis_pos = None # define here mostly to replace all but first.
+ for i, indx in enumerate(in_indices):
+ if indx is None:
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ no_copy = False
+ if indx.ndim == 0:
+ raise IndexError
+ # boolean indices can have higher dimensions
+ ndim += indx.ndim
+ fancy_dim += indx.ndim
+ continue
+ if indx is Ellipsis:
+ if ellipsis_pos is None:
+ ellipsis_pos = i
+ continue # do not increment ndim counter
+ in_indices[i] = slice(None,None)
+ ndim += 1
+ continue
+ if isinstance(indx, slice):
+ ndim += 1
+ continue
+ if not isinstance(indx, np.ndarray):
+ # This could be open for changes in numpy.
+ # numpy should maybe raise an error if casting to intp
+ # is not safe. It rejects np.array([1., 2.]) but not
+ # [1., 2.] as index (same for ie. np.take).
+ # (Note the importance of empty lists if changing this here)
+ indx = np.array(indx, dtype=np.intp)
+ in_indices[i] = indx
+ elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
+ raise IndexError('arrays used as indices must be of integer (or boolean) type')
+ if indx.ndim != 0:
+ no_copy = False
+ ndim += 1
+ fancy_dim += 1
+
+ if arr.ndim - ndim < 0:
+ # we can't take more dimensions then we have, not even for 0-d arrays.
+ # since a[()] makes sense, but not a[(),]. We will raise an error
+ # lateron, unless a broadcasting error occurs first.
+ raise IndexError
+
+ if ndim == 0 and not None in in_indices:
+ # Well we have no indexes or one Ellipsis. This is legal.
+ return arr.copy(), no_copy
+
+ if ellipsis_pos is not None:
+ in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None,None)] * (arr.ndim - ndim)
+
+ for ax, indx in enumerate(in_indices):
+ if isinstance(indx, slice):
+ # convert to an index array anways:
+ indx = np.arange(*indx.indices(arr.shape[ax]))
+ indices.append(['s', indx])
+ continue
+ elif indx is None:
+ # this is like taking a slice with one element from a new axis:
+ indices.append(['n', np.array([0], dtype=np.intp)])
+ arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ # This may be open for improvement in numpy.
+ # numpy should probably cast boolean lists to boolean indices
+ # instead of intp!
+
+ # Numpy supports for a boolean index with
+ # non-matching shape as long as the True values are not
+ # out of bounds. Numpy maybe should maybe not allow this,
+ # (at least not array that are larger then the original one).
+ try:
+ flat_indx = np.ravel_multi_index(np.nonzero(indx),
+ arr.shape[ax:ax+indx.ndim], mode='raise')
+ except:
+ error_unless_broadcast_to_empty = True
+ # fill with 0s instead, and raise error later
+ flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
+ # concatenate axis into a single one:
+ if indx.ndim != 0:
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ + arr.shape[ax+indx.ndim:]))
+ indx = flat_indx
+ else:
+ # This could be changed, a 0-d boolean index can
+ # make sense (even outide the 0-d indexed array case)
+ # Note that originally this is could be interpreted as
+ # integer in the full integer special case.
+ raise IndexError
+ if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos:
+ # NOTE: There could still have been a 0-sized Ellipsis
+ # between them. Checked that with ellipsis_pos.
+ indices[-1].append(indx)
+ else:
+ # We have a fancy index that is not after an existing one.
+ # NOTE: A 0-d array triggers this as well, while
+ # one may expect it to not trigger it, since a scalar
+ # would not be considered fancy indexing.
+ num_fancy += 1
+ indices.append(['f', indx])
+
+ if num_fancy > 1 and not no_copy:
+ # We have to flush the fancy indexes left
+ new_indices = indices[:]
+ axes = list(range(arr.ndim))
+ fancy_axes = []
+ new_indices.insert(0, ['f'])
+ ni = 0
+ ai = 0
+ for indx in indices:
+ ni += 1
+ if indx[0] == 'f':
+ new_indices[0].extend(indx[1:])
+ del new_indices[ni]
+ ni -= 1
+ for ax in range(ai, ai + len(indx[1:])):
+ fancy_axes.append(ax)
+ axes.remove(ax)
+ ai += len(indx) - 1 # axis we are at
+ indices = new_indices
+ # and now we need to transpose arr:
+ arr = arr.transpose(*(fancy_axes + axes))
+
+ # We only have one 'f' index now and arr is transposed accordingly.
+ # Now handle newaxes by reshaping...
+ ax = 0
+ for indx in indices:
+ if indx[0] == 'f':
+ if len(indx) == 1:
+ continue
+ # First of all, reshape arr to combine fancy axes into one:
+ orig_shape = arr.shape
+ orig_slice = orig_shape[ax:ax + len(indx[1:])]
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(orig_slice).astype(int),)
+ + arr.shape[ax + len(indx[1:]):]))
+
+ # Check if broadcasting works
+ if len(indx[1:]) != 1:
+ res = np.broadcast(*indx[1:]) # raises ValueError...
+ else:
+ res = indx[1]
+ # unfortunatly the indices might be out of bounds. So check
+ # that first, and use mode='wrap' then. However only if
+ # there are any indices...
+ if res.size != 0:
+ if error_unless_broadcast_to_empty:
+ raise IndexError
+ for _indx, _size in zip(indx[1:], orig_slice):
+ if _indx.size == 0:
+ continue
+ if np.any(_indx >= _size) or np.any(_indx < -_size):
+ raise IndexError
+ if len(indx[1:]) == len(orig_slice):
+ if np.product(orig_slice) == 0:
+ # Work around for a crash or IndexError with 'wrap'
+ # in some 0-sized cases.
+ try:
+ mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise')
+ except:
+ # This happens with 0-sized orig_slice (sometimes?)
+ # here it is a ValueError, but indexing gives a:
+ raise IndexError('invalid index into 0-sized')
+ else:
+ mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap')
+ else:
+ # Maybe never happens...
+ raise ValueError
+ arr = arr.take(mi.ravel(), axis=ax)
+ arr = arr.reshape((arr.shape[:ax]
+ + mi.shape
+ + arr.shape[ax+1:]))
+ ax += mi.ndim
+ continue
+
+ # If we are here, we have a 1D array for take:
+ arr = arr.take(indx[1], axis=ax)
+ ax += 1
+
+ return arr, no_copy
+
+
+ def _check_multi_index(self, arr, index):
+ """Check a multi index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be a reshaped arange.
+ index : tuple of indexing objects
+ Index being tested.
+ """
+ # Test item getting
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, index)
+ except Exception as e:
+ assert_raises(Exception, arr.__getitem__, index)
+ assert_raises(Exception, arr.__setitem__, index, 0)
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+
+ def _check_single_index(self, arr, index):
+ """Check a single index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be an arange.
+ index : indexing object
+ Index being tested. Must be a single index and not a tuple
+ of indexing objects (see also `_check_multi_index`).
+ """
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, (index,))
+ except Exception as e:
+ assert_raises(Exception, arr.__getitem__, index)
+ assert_raises(Exception, arr.__setitem__, index, 0)
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+
+ def _compare_index_result(self, arr, index, mimic_get, no_copy):
+ """Compare mimicked result to indexing result.
+ """
+ arr = arr.copy()
+ indexed_arr = arr[index]
+ assert_array_equal(indexed_arr, mimic_get)
+ # Check if we got a view, unless its a 0-sized or 0-d array.
+ # (then its not a view, and that does not matter)
+ if indexed_arr.size != 0 and indexed_arr.ndim != 0:
+ assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
+ # Check reference count of the original array
+ if no_copy:
+ # refcount increases by one:
+ assert_equal(sys.getrefcount(arr), 3)
+ else:
+ assert_equal(sys.getrefcount(arr), 2)
+
+ # Test non-broadcast setitem:
+ b = arr.copy()
+ b[index] = mimic_get + 1000
+ if b.size == 0:
+ return # nothing to compare here...
+ if no_copy and indexed_arr.ndim != 0:
+ # change indexed_arr in-place to manipulate original:
+ indexed_arr += 1000
+ assert_array_equal(arr, b)
+ return
+ # Use the fact that the array is originally an arange:
+ arr.flat[indexed_arr.ravel()] += 1000
+ assert_array_equal(arr, b)
+
+
+ def test_boolean(self):
+ a = np.array(5)
+ assert_equal(a[np.array(True)], 5)
+ a[np.array(True)] = 1
+ assert_equal(a, 1)
+ # NOTE: This is different from normal broadcasting, as
+ # arr[boolean_array] works like in a multi index. Which means
+ # it is aligned to the left. This is probably correct for
+ # consistency with arr[boolean_array,] also no broadcasting
+ # is done at all
+ self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),))
+ self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[...,0],))
+ self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None,...],))
+
+
+ def test_multidim(self):
+ # Automatically test combinations with complex indexes on 2nd (or 1st)
+ # spot and the simple ones in one other spot.
+ with warnings.catch_warnings():
+ # This is so that np.array(True) is not accepted in a full integer
+ # index, when running the file seperatly.
+ warnings.filterwarnings('error', '', DeprecationWarning)
+ for simple_pos in [0,2,3]:
+ tocheck = [self.fill_indices, self.complex_indices,
+ self.fill_indices, self.fill_indices]
+ tocheck[simple_pos] = self.simple_indices
+ for index in product(*tocheck):
+ index = tuple(i for i in index if i != 'skip')
+ self._check_multi_index(self.a, index)
+ self._check_multi_index(self.b, index)
+ # Check very simple item getting:
+ self._check_multi_index(self.a, (0,0,0,0))
+ self._check_multi_index(self.b, (0,0,0,0))
+ # Also check (simple cases of) too many indices:
+ assert_raises(IndexError, self.a.__getitem__, (0,0,0,0,0))
+ assert_raises(IndexError, self.a.__setitem__, (0,0,0,0,0), 0)
+ assert_raises(IndexError, self.a.__getitem__, (0,0,[1],0,0))
+ assert_raises(IndexError, self.a.__setitem__, (0,0,[1],0,0), 0)
+
-def test_boolean_indexing():
- # Indexing a 2-dimensional array with a length-1 array of 'True'
- a = np.array([[ 0., 0., 0.]])
- b = np.array([ True], dtype=bool)
- assert_equal(a[b], a)
+ def test_1d(self):
+ a = np.arange(10)
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error', '', DeprecationWarning)
+ for index in self.complex_indices:
+ self._check_single_index(a, index)
- a[b] = 1.
- assert_equal(a, [[1., 1., 1.]])
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
new file mode 100644
index 000000000..425b1d893
--- /dev/null
+++ b/numpy/core/tests/test_item_selection.py
@@ -0,0 +1,65 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from numpy.testing import *
+import sys, warnings
+
+
+class TestTake(TestCase):
+ def test_simple(self):
+ a = [[1, 2], [3, 4]]
+ a_str = [[b'1', b'2'],[b'3', b'4']]
+ modes = ['raise', 'wrap', 'clip']
+ indices = [-1, 4]
+ index_arrays = [np.empty(0, dtype=np.intp),
+ np.empty(tuple(), dtype=np.intp),
+ np.empty((1,1), dtype=np.intp)]
+ real_indices = {}
+ real_indices['raise'] = {-1:1, 4:IndexError}
+ real_indices['wrap'] = {-1:1, 4:0}
+ real_indices['clip'] = {-1:0, 4:1}
+ # Currently all types but object, use the same function generation.
+ # So it should not be necessary to test all. However test also a non
+ # refcounted struct on top of object.
+ types = np.int, np.object, np.dtype([('', 'i', 2)])
+ for t in types:
+ # ta works, even if the array may be odd if buffer interface is used
+ ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
+ tresult = list(ta.T.copy())
+ for index_array in index_arrays:
+ if index_array.size != 0:
+ tresult[0].shape = (2,) + index_array.shape
+ tresult[1].shape = (2,) + index_array.shape
+ for mode in modes:
+ for index in indices:
+ real_index = real_indices[mode][index]
+ if real_index is IndexError and index_array.size != 0:
+ index_array.put(0, index)
+ assert_raises(IndexError, ta.take, index_array,
+ mode=mode, axis=1)
+ elif index_array.size != 0:
+ index_array.put(0, index)
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_array_equal(res, tresult[real_index])
+ else:
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_(res.shape == (2,) + index_array.shape)
+
+
+ def test_refcounting(self):
+ objects = [object() for i in range(10)]
+ for mode in ('raise', 'clip', 'wrap'):
+ a = np.array(objects)
+ b = np.array([2, 2, 4, 5, 3, 5])
+ a.take(b, out=a[:6])
+ del a
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+ # not contiguous, example:
+ a = np.array(objects * 2)[::2]
+ a.take(b, out=a[:6])
+ del a
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index 4175ceeac..8d858c28b 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -1,8 +1,10 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
-from numpy import seterr, array
+from numpy import errstate, array
class TestMachAr(TestCase):
def _run_machar_highprec(self):
@@ -17,14 +19,11 @@ class TestMachAr(TestCase):
def test_underlow(self):
"""Regression testing for #759: instanciating MachAr for dtype =
np.float96 raises spurious warning."""
- serrstate = seterr(all='raise')
- try:
+ with errstate(all='raise'):
try:
self._run_machar_highprec()
- except FloatingPointError, e:
+ except FloatingPointError as e:
self.fail("Caught %s exception, should not have been raised." % e)
- finally:
- seterr(**serrstate)
if __name__ == "__main__":
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 7f134ee91..97afb68a2 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -1,8 +1,11 @@
-from tempfile import NamedTemporaryFile, mktemp
+from __future__ import division, absolute_import, print_function
+
+import sys
+from tempfile import NamedTemporaryFile, TemporaryFile, mktemp
import os
from numpy import memmap
-from numpy import arange, allclose
+from numpy import arange, allclose, asarray
from numpy.testing import *
class TestMemmap(TestCase):
@@ -37,6 +40,11 @@ class TestMemmap(TestCase):
del fp
os.unlink(tmpname)
+ def test_unnamed_file(self):
+ with TemporaryFile() as f:
+ fp = memmap(f, dtype=self.dtype, shape=self.shape)
+ del fp
+
def test_attributes(self):
offset = 1
mode = "w+"
@@ -64,6 +72,7 @@ class TestMemmap(TestCase):
shape=self.shape)
self.assertEqual(fp.filename, self.tmpfp.name)
+ @dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
@@ -104,5 +113,14 @@ class TestMemmap(TestCase):
shape=self.shape)
assert fp[:2, :2]._mmap is fp._mmap
+ def test_view(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ new1 = fp.view()
+ new2 = new1.view()
+ assert(new1.base is fp)
+ assert(new2.base is fp)
+ new_array = asarray(fp)
+ assert(new_array.base is fp)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 29e28ff26..d37bcd331 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1,13 +1,14 @@
+from __future__ import division, absolute_import, print_function
+
import tempfile
import sys
import os
import warnings
+
import numpy as np
from nose import SkipTest
from numpy.core import *
-from numpy.compat import asbytes
-from numpy.testing.utils import WarningManager
-from numpy.compat import asbytes, getexception, strchar
+from numpy.compat import asbytes, getexception, strchar, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
@@ -17,7 +18,8 @@ from numpy.core.multiarray_tests import (
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
- assert_array_almost_equal, assert_allclose, runstring, dec
+ assert_array_almost_equal, assert_allclose,
+ assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
@@ -29,7 +31,7 @@ if sys.version_info[:2] > (3, 2):
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
-else:
+else:
EMPTY = None
@@ -94,19 +96,36 @@ class TestAttributes(TestCase):
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
+ def test_int_subclassing(self):
+ # Regression test for https://github.com/numpy/numpy/pull/3526
+
+ numpy_int = np.int_(0)
+
+ if sys.version_info[0] >= 3:
+ # On Py3k int_ should not inherit from int, because it's not fixed-width anymore
+ assert_equal(isinstance(numpy_int, int), False)
+ else:
+ # Otherwise, it should inherit from int...
+ assert_equal(isinstance(numpy_int, int), True)
+
+ # ... and fast-path checks on C-API level should also work
+ from numpy.core.multiarray_tests import test_int_subclass
+ assert_equal(test_int_subclass(numpy_int), True)
+
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
- return ndarray([size], buffer=x, dtype=int,
+ return ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
- #self.assertRaises(ValueError, make_array, 8, 3, 0)
- #self.assertRaises(ValueError, lambda: ndarray([1], strides=4))
-
+ assert_equal(make_array(8, 3, 0), np.array([3]*8))
+ # Check behavior reported in gh-2503:
+ self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3]))
+ make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
@@ -122,7 +141,20 @@ class TestAttributes(TestCase):
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
- #self.assertRaises(ValueError, make_array, 8, 3, 0)
+ # Check that the true extent of the array is used.
+ # Test relies on as_strided base not exposing a buffer.
+ x = np.lib.stride_tricks.as_strided(arange(1), (10,10), (0,0))
+ def set_strides(arr, strides):
+ arr.strides = strides
+ self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+
+ # Test for offset calculations:
+ x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
+ shape=(10,), strides=(-1,))
+ self.assertRaises(ValueError, set_strides, x[::-1], -1)
+ a = x[::-1]
+ a.strides = 1
+ a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
@@ -132,9 +164,17 @@ class TestAttributes(TestCase):
y[...] = 1
assert_equal(x,y)
+ def test_fill_struct_array(self):
+ # Filling from a scalar
x = array([(0,0.0), (1,1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
+ # Filling from a tuple that can be converted
+ # to a scalar
+ x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
+ x.fill((3.5, -2))
+ assert_array_equal(x['a'], [3.5, 3.5])
+ assert_array_equal(x['b'], [-2, -2])
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
@@ -563,7 +603,7 @@ class TestMethods(TestCase):
# test object array sorts.
a = np.empty((101,), dtype=np.object)
- a[:] = range(101)
+ a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
@@ -626,6 +666,30 @@ class TestMethods(TestCase):
d.sort()
assert_equal(d, c, "test sort with default axis")
+ def test_copy(self):
+ def assert_fortran(arr):
+ assert_(arr.flags.fortran)
+ assert_(arr.flags.f_contiguous)
+ assert_(not arr.flags.c_contiguous)
+
+ def assert_c(arr):
+ assert_(not arr.flags.fortran)
+ assert_(not arr.flags.f_contiguous)
+ assert_(arr.flags.c_contiguous)
+
+ a = np.empty((2, 2), order='F')
+ # Test copying a Fortran array
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_fortran(a.copy('A'))
+
+ # Now test starting with a C array.
+ a = np.empty((2, 2), order='C')
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
@@ -713,7 +777,7 @@ class TestMethods(TestCase):
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
- a[:] = range(101)
+ a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
@@ -866,6 +930,293 @@ class TestMethods(TestCase):
assert_equal(a.searchsorted(k, side='l', sorter=s), [0, 20, 40, 60, 80])
assert_equal(a.searchsorted(k, side='r', sorter=s), [20, 40, 60, 80, 100])
+
+ def test_partition(self):
+ d = np.arange(10)
+ assert_raises(TypeError, np.partition, d, 2, kind=1)
+ assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
+ assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
+ assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
+ assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
+ for k in ("introselect",):
+ d = np.array([])
+ assert_array_equal(np.partition(d, 0, kind=k), d)
+ assert_array_equal(np.argpartition(d, 0, kind=k), d)
+ d = np.ones((1))
+ assert_array_equal(np.partition(d, 0, kind=k)[0], d)
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+
+ # kth not modified
+ kth = np.array([30, 15, 5])
+ okth = kth.copy()
+ np.partition(np.arange(40), kth)
+ assert_array_equal(kth, okth)
+
+ for r in ([2, 1], [1, 2], [1, 1]):
+ d = np.array(r)
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
+ assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+ assert_array_equal(d[np.argpartition(d, 1, kind=k)],
+ np.partition(d, 1, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
+ [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
+ d = np.array(r)
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
+ assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
+ assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+ assert_array_equal(d[np.argpartition(d, 1, kind=k)],
+ np.partition(d, 1, kind=k))
+ assert_array_equal(d[np.argpartition(d, 2, kind=k)],
+ np.partition(d, 2, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ d = np.ones((50))
+ assert_array_equal(np.partition(d, 0, kind=k), d)
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+
+ # sorted
+ d = np.arange((49))
+ self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
+ self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
+ assert_array_equal(d[np.argpartition(d, 5, kind=k)],
+ np.partition(d, 5, kind=k))
+ assert_array_equal(d[np.argpartition(d, 15, kind=k)],
+ np.partition(d, 15, kind=k))
+
+ # rsorted
+ d = np.arange((47))[::-1]
+ self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
+ self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
+ assert_array_equal(d[np.argpartition(d, 6, kind=k)],
+ np.partition(d, 6, kind=k))
+ assert_array_equal(d[np.argpartition(d, 16, kind=k)],
+ np.partition(d, 16, kind=k))
+
+ assert_array_equal(np.partition(d, -6, kind=k),
+ np.partition(d, 41, kind=k))
+ assert_array_equal(np.partition(d, -16, kind=k),
+ np.partition(d, 31, kind=k))
+ assert_array_equal(d[np.argpartition(d, -6, kind=k)],
+ np.partition(d, 41, kind=k))
+
+ # equal elements
+ d = np.arange((47)) % 7
+ tgt = np.sort(np.arange((47)) % 7)
+ np.random.shuffle(d)
+ for i in range(d.size):
+ self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
+ assert_array_equal(d[np.argpartition(d, 6, kind=k)],
+ np.partition(d, 6, kind=k))
+ assert_array_equal(d[np.argpartition(d, 16, kind=k)],
+ np.partition(d, 16, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ d = np.array([2, 1])
+ d.partition(0, kind=k)
+ assert_raises(ValueError, d.partition, 2)
+ assert_raises(ValueError, d.partition, 3, axis=1)
+ assert_raises(ValueError, np.partition, d, 2)
+ assert_raises(ValueError, np.partition, d, 2, axis=1)
+ assert_raises(ValueError, d.argpartition, 2)
+ assert_raises(ValueError, d.argpartition, 3, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 2)
+ assert_raises(ValueError, np.argpartition, d, 2, axis=1)
+ d = np.arange(10).reshape((2, 5))
+ d.partition(1, axis=0, kind=k)
+ d.partition(4, axis=1, kind=k)
+ np.partition(d, 1, axis=0, kind=k)
+ np.partition(d, 4, axis=1, kind=k)
+ np.partition(d, 1, axis=None, kind=k)
+ np.partition(d, 9, axis=None, kind=k)
+ d.argpartition(1, axis=0, kind=k)
+ d.argpartition(4, axis=1, kind=k)
+ np.argpartition(d, 1, axis=0, kind=k)
+ np.argpartition(d, 4, axis=1, kind=k)
+ np.argpartition(d, 1, axis=None, kind=k)
+ np.argpartition(d, 9, axis=None, kind=k)
+ assert_raises(ValueError, d.partition, 2, axis=0)
+ assert_raises(ValueError, d.partition, 11, axis=1)
+ assert_raises(TypeError, d.partition, 2, axis=None)
+ assert_raises(ValueError, np.partition, d, 9, axis=1)
+ assert_raises(ValueError, np.partition, d, 11, axis=None)
+ assert_raises(ValueError, d.argpartition, 2, axis=0)
+ assert_raises(ValueError, d.argpartition, 11, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 9, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 11, axis=None)
+
+ td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
+ for s in (9, 16)]
+ for dt, s in td:
+ aae = assert_array_equal
+ at = self.assertTrue
+
+ d = np.arange(s, dtype=dt)
+ np.random.shuffle(d)
+ d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
+ map(np.random.shuffle, d1)
+ d0 = np.transpose(d1)
+ for i in range(d.size):
+ p = np.partition(d, i, kind=k)
+ self.assertEqual(p[i], i)
+ # all before are smaller
+ assert_array_less(p[:i], p[i])
+ # all after are larger
+ assert_array_less(p[i], p[i + 1:])
+ aae(p, d[np.argpartition(d, i, kind=k)])
+
+ p = np.partition(d1, i, axis=1, kind=k)
+ aae(p[:,i], np.array([i] * d1.shape[0], dtype=dt))
+ # array_less does not seem to work right
+ at((p[:, :i].T <= p[:, i]).all(),
+ msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
+ at((p[:, i + 1:].T > p[:, i]).all(),
+ msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
+ aae(p, d1[np.arange(d1.shape[0])[:,None],
+ np.argpartition(d1, i, axis=1, kind=k)])
+
+ p = np.partition(d0, i, axis=0, kind=k)
+ aae(p[i, :], np.array([i] * d1.shape[0],
+ dtype=dt))
+ # array_less does not seem to work right
+ at((p[:i, :] <= p[i, :]).all(),
+ msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
+ at((p[i + 1:, :] > p[i, :]).all(),
+ msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
+ aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
+ np.arange(d0.shape[1])[None,:]])
+
+ # check inplace
+ dc = d.copy()
+ dc.partition(i, kind=k)
+ assert_equal(dc, np.partition(d, i, kind=k))
+ dc = d0.copy()
+ dc.partition(i, axis=0, kind=k)
+ assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
+ dc = d1.copy()
+ dc.partition(i, axis=1, kind=k)
+ assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
+
+
+ def assert_partitioned(self, d, kth):
+ prev = 0
+ for k in np.sort(kth):
+ assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
+ assert_((d[k:] >= d[k]).all(),
+ msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
+ prev = k + 1
+
+
+ def test_partition_iterative(self):
+ d = np.arange(17)
+ kth = (0, 1, 2, 429, 231)
+ assert_raises(ValueError, d.partition, kth)
+ assert_raises(ValueError, d.argpartition, kth)
+ d = np.arange(10).reshape((2, 5))
+ assert_raises(ValueError, d.partition, kth, axis=0)
+ assert_raises(ValueError, d.partition, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=None)
+
+ d = np.array([3, 4, 2, 1])
+ p = np.partition(d, (0, 3))
+ self.assert_partitioned(p, (0, 3))
+ self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
+
+ assert_array_equal(p, np.partition(d, (-3, -1)))
+ assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
+
+ d = np.arange(17)
+ np.random.shuffle(d)
+ d.partition(range(d.size))
+ assert_array_equal(np.arange(17), d)
+ np.random.shuffle(d)
+ assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
+
+ # test unsorted kth
+ d = np.arange(17)
+ np.random.shuffle(d)
+ keys = np.array([1, 3, 8, -2])
+ np.random.shuffle(d)
+ p = np.partition(d, keys)
+ self.assert_partitioned(p, keys)
+ p = d[np.argpartition(d, keys)]
+ self.assert_partitioned(p, keys)
+ np.random.shuffle(keys)
+ assert_array_equal(np.partition(d, keys), p)
+ assert_array_equal(d[np.argpartition(d, keys)], p)
+
+ # equal kth
+ d = np.arange(20)[::-1]
+ self.assert_partitioned(np.partition(d, [5]*4), [5])
+ self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
+ [5]*4 + [6, 13])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
+ [5]*4 + [6, 13])
+
+ d = np.arange(12)
+ np.random.shuffle(d)
+ d1 = np.tile(np.arange(12), (4, 1))
+ map(np.random.shuffle, d1)
+ d0 = np.transpose(d1)
+
+ kth = (1, 6, 7, -1)
+ p = np.partition(d1, kth, axis=1)
+ pa = d1[np.arange(d1.shape[0])[:,None],
+ d1.argpartition(kth, axis=1)]
+ assert_array_equal(p, pa)
+ for i in range(d1.shape[0]):
+ self.assert_partitioned(p[i, :], kth)
+ p = np.partition(d0, kth, axis=0)
+ pa = d0[np.argpartition(d0, kth, axis=0),
+ np.arange(d0.shape[1])[None,:]]
+ assert_array_equal(p, pa)
+ for i in range(d0.shape[1]):
+ self.assert_partitioned(p[:, i], kth)
+
+
+ def test_partition_cdtype(self):
+ d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.9, 38)],
+ dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+ tgt = np.sort(d, order=['age', 'height'])
+ assert_array_equal(np.partition(d, range(d.size),
+ order=['age', 'height']),
+ tgt)
+ assert_array_equal(d[np.argpartition(d, range(d.size),
+ order=['age', 'height'])],
+ tgt)
+ for k in range(d.size):
+ assert_equal(np.partition(d, k, order=['age', 'height'])[k],
+ tgt[k])
+ assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
+ tgt[k])
+
+ d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, range(d.size)), tgt)
+ for k in range(d.size):
+ assert_equal(np.partition(d, k)[k], tgt[k])
+ assert_equal(d[np.argpartition(d, k)][k], tgt[k])
+
+
def test_flatten(self):
x0 = np.array([[1,2,3],[4,5,6]], np.int32)
x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32)
@@ -888,6 +1239,16 @@ class TestMethods(TestCase):
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
+ # test passing in an output array
+ c = np.zeros_like(a)
+ a.dot(b,c)
+ assert_equal(c, np.dot(a,b))
+
+ # test keyword args
+ c = np.zeros_like(a)
+ a.dot(b=b,out=c)
+ assert_equal(c, np.dot(a,b))
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
@@ -908,17 +1269,13 @@ class TestMethods(TestCase):
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_deprecation(self):
- import warnings
- from numpy.testing.utils import WarningManager
+
def collect_warning_types(f, *args, **kwargs):
- ctx = WarningManager(record=True)
- warning_log = ctx.__enter__()
- warnings.simplefilter("always")
- try:
+ with warnings.catch_warnings(record=True) as log:
+ warnings.simplefilter("always")
f(*args, **kwargs)
- finally:
- ctx.__exit__()
- return [w.category for w in warning_log]
+ return [w.category for w in log]
+
a = np.arange(9).reshape(3, 3)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
@@ -1033,7 +1390,7 @@ class TestMethods(TestCase):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
- for i in xrange(100):
+ for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
@@ -1181,7 +1538,7 @@ class TestFancyIndexing(TestCase):
x[m] = 5
assert_array_equal(x, array([1,5,3,4]))
- def test_assign_mask(self):
+ def test_assign_mask2(self):
xorig = array([[1,2,3,4],[5,6,7,8]])
m = array([0,1],bool)
m2 = array([[0,1],[1,0]],bool)
@@ -1220,8 +1577,8 @@ class TestStringCompare(TestCase):
def test_unicode(self):
- g1 = array([u"This",u"is",u"example"])
- g2 = array([u"This",u"was",u"example"])
+ g1 = array([sixu("This"),sixu("is"),sixu("example")])
+ g2 = array([sixu("This"),sixu("was"),sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]])
@@ -1285,10 +1642,10 @@ class TestArgmax(TestCase):
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
- for i in xrange(a.ndim):
+ for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
- axes = range(a.ndim)
+ axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
@@ -1353,10 +1710,10 @@ class TestArgmin(TestCase):
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
- for i in xrange(a.ndim):
+ for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
- axes = range(a.ndim)
+ axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
@@ -1476,7 +1833,7 @@ class TestPutmask(object):
mask = x < 40
for val in [-100,0,15]:
- for types in np.sctypes.itervalues():
+ for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T),T,mask,val
@@ -1515,7 +1872,7 @@ class TestPutmask(object):
class TestTake(object):
def tst_basic(self,x):
- ind = range(x.shape[0])
+ ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
@@ -1523,7 +1880,7 @@ class TestTake(object):
x = np.random.random(24)*100
x.shape = 2,3,4
- for types in np.sctypes.itervalues():
+ for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T)
@@ -1974,21 +2331,17 @@ class TestRecord(TestCase):
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
- assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
- assert_raises(ValueError, a.__getitem__, u'\u03e0')
+ assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
+ assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
- import warnings
- from numpy.testing.utils import WarningManager
+
def collect_warning_types(f, *args, **kwargs):
- ctx = WarningManager(record=True)
- warning_log = ctx.__enter__()
- warnings.simplefilter("always")
- try:
+ with warnings.catch_warnings(record=True) as log:
+ warnings.simplefilter("always")
f(*args, **kwargs)
- finally:
- ctx.__exit__()
- return [w.category for w in warning_log]
+ return [w.category for w in log]
+
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
@@ -2046,7 +2399,178 @@ class TestView(TestCase):
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
+def _mean(a, **args):
+ return a.mean(**args)
+
+def _var(a, **args):
+ return a.var(**args)
+
+def _std(a, **args):
+ return a.std(**args)
+
class TestStats(TestCase):
+
+ funcs = [_mean, _var, _std]
+
+ def setUp(self):
+ np.random.seed(range(3))
+ self.rmat = np.random.random((4, 5))
+ self.cmat = self.rmat + 1j * self.rmat
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for f in self.funcs:
+ for axis in [0, 1]:
+ res = f(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == mat.ndim)
+ assert_(res.shape[axis] == 1)
+ for axis in [None]:
+ res = f(mat, axis=axis, keepdims=True)
+ assert_(res.shape == (1, 1))
+
+ def test_out(self):
+ mat = np.eye(3)
+ for f in self.funcs:
+ out = np.zeros(3)
+ tgt = f(mat, axis=1)
+ res = f(mat, axis=1, out=out)
+ assert_almost_equal(res, out)
+ assert_almost_equal(res, tgt)
+ out = np.empty(2)
+ assert_raises(ValueError, f, mat, axis=1, out=out)
+ out = np.empty((2, 2))
+ assert_raises(ValueError, f, mat, axis=1, out=out)
+
+ def test_dtype_from_input(self):
+ icodes = np.typecodes['AllInteger']
+ fcodes = np.typecodes['AllFloat']
+
+ # integer types
+ for f in self.funcs:
+ for c in icodes:
+ mat = np.eye(3, dtype=c)
+ tgt = np.float64
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+ # mean for float types
+ for f in [_mean]:
+ for c in fcodes:
+ mat = np.eye(3, dtype=c)
+ tgt = mat.dtype.type
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+ # var, std for float types
+ for f in [_var, _std]:
+ for c in fcodes:
+ mat = np.eye(3, dtype=c)
+ tgt = mat.real.dtype.type
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_dtype(self):
+ icodes = np.typecodes['AllInteger']
+ fcodes = np.typecodes['AllFloat']
+ mat = np.eye(3)
+
+ # stats for integer types
+ # fixme:
+ # this needs definition as there are lots places along the line
+ # where type casting may take place.
+ #for f in self.funcs:
+ #for c in icodes:
+ #tgt = np.dtype(c).type
+ #res = f(mat, axis=1, dtype=c).dtype.type
+ #assert_(res is tgt)
+ ## scalar case
+ #res = f(mat, axis=None, dtype=c).dtype.type
+ #assert_(res is tgt)
+
+ # stats for float types
+ for f in self.funcs:
+ for c in fcodes:
+ tgt = np.dtype(c).type
+ res = f(mat, axis=1, dtype=c).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None, dtype=c).dtype.type
+ assert_(res is tgt)
+
+ def test_ddof(self):
+ for f in [_var]:
+ for ddof in range(3):
+ dim = self.rmat.shape[1]
+ tgt = f(self.rmat, axis=1) * dim
+ res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
+ for f in [_std]:
+ for ddof in range(3):
+ dim = self.rmat.shape[1]
+ tgt = f(self.rmat, axis=1) * np.sqrt(dim)
+ res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
+ assert_almost_equal(res, tgt)
+ assert_almost_equal(res, tgt)
+
+ def test_ddof_too_big(self):
+ dim = self.rmat.shape[1]
+ for f in [_var, _std]:
+ for ddof in range(dim, dim + 2):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(self.rmat, axis=1, ddof=ddof)
+ assert_(not (res < 0).any())
+ assert_(len(w) > 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ def test_empty(self):
+ A = np.zeros((0,3))
+ for f in self.funcs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(A, axis=axis)).all())
+ assert_(len(w) > 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(A, axis=axis), np.zeros([]))
+
+ def test_mean_values(self):
+ for mat in [self.rmat, self.cmat]:
+ for axis in [0, 1]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * mat.shape[axis]
+ assert_almost_equal(res, tgt)
+ for axis in [None]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * np.prod(mat.shape)
+ assert_almost_equal(res, tgt)
+
+ def test_var_values(self):
+ for mat in [self.rmat, self.cmat]:
+ for axis in [0, 1, None]:
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conj()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_std_values(self):
+ for mat in [self.rmat, self.cmat]:
+ for axis in [0, 1, None]:
+ tgt = np.sqrt(_var(mat, axis=axis))
+ res = _std(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
@@ -2056,6 +2580,7 @@ class TestStats(TestCase):
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
+
dat = TestArray([[1,2,3,4],[5,6,7,8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
@@ -2083,7 +2608,7 @@ class TestDot(TestCase):
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
- for i in xrange(12):
+ for i in range(12):
dot(f,v,r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f,v,out=None)
@@ -2450,388 +2975,390 @@ class TestStackedNeighborhoodIter(TestCase):
assert_array_equal(l, r)
class TestWarnings(object):
+
def test_complex_warning(self):
x = np.array([1,2])
y = np.array([1-2j,1+2j])
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1,2])
- finally:
- warn_ctx.__exit__()
class TestMinScalarType(object):
+
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
+
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
- assert_equal(wanted, dt)
+ assert_equal(wanted, dt)
+
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
- assert_equal(wanted, dt)
+ assert_equal(wanted, dt)
+
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
- assert_equal(wanted, dt)
+ assert_equal(wanted, dt)
+
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
-
-if sys.version_info >= (2, 6):
-
- if sys.version_info[:2] == (2, 6):
- from numpy.core.multiarray import memorysimpleview as memoryview
-
- from numpy.core._internal import _dtype_from_pep3118
-
- class TestPEP3118Dtype(object):
- def _check(self, spec, wanted):
- dt = np.dtype(wanted)
- if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
- if wanted[-1][0] == '':
- names = list(dt.names)
- names[-1] = ''
- dt.names = tuple(names)
- assert_equal(_dtype_from_pep3118(spec), dt,
- err_msg="spec %r != dtype %r" % (spec, wanted))
-
- def test_native_padding(self):
- align = np.dtype('i').alignment
- for j in xrange(8):
- if j == 0:
- s = 'bi'
- else:
- s = 'b%dxi' % j
- self._check('@'+s, {'f0': ('i1', 0),
- 'f1': ('i', align*(1 + j//align))})
- self._check('='+s, {'f0': ('i1', 0),
- 'f1': ('i', 1+j)})
-
- def test_native_padding_2(self):
- # Native padding should work also for structs and sub-arrays
- self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
- self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
-
- def test_trailing_padding(self):
- # Trailing padding should be included, *and*, the item size
- # should match the alignment if in aligned mode
- align = np.dtype('i').alignment
- def VV(n):
- return 'V%d' % (align*(1 + (n-1)//align))
-
- self._check('ix', [('f0', 'i'), ('', VV(1))])
- self._check('ixx', [('f0', 'i'), ('', VV(2))])
- self._check('ixxx', [('f0', 'i'), ('', VV(3))])
- self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
- self._check('i7x', [('f0', 'i'), ('', VV(7))])
-
- self._check('^ix', [('f0', 'i'), ('', 'V1')])
- self._check('^ixx', [('f0', 'i'), ('', 'V2')])
- self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
- self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
- self._check('^i7x', [('f0', 'i'), ('', 'V7')])
-
- def test_native_padding_3(self):
- dt = np.dtype(
- [('a', 'b'), ('b', 'i'),
- ('sub', np.dtype('b,i')), ('c', 'i')],
- align=True)
- self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
-
- dt = np.dtype(
- [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
- ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
- self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
-
- def test_padding_with_array_inside_struct(self):
- dt = np.dtype(
- [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
- ('d', 'i')],
- align=True)
- self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
-
- def test_byteorder_inside_struct(self):
- # The byte order after @T{=i} should be '=', not '@'.
- # Check this by noting the absence of native alignment.
- self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
- 'f1': ('i', 5)})
-
- def test_intra_padding(self):
- # Natively aligned sub-arrays may require some internal padding
- align = np.dtype('i').alignment
- def VV(n):
- return 'V%d' % (align*(1 + (n-1)//align))
-
- self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
-
- class TestNewBufferProtocol(object):
- def _check_roundtrip(self, obj):
- obj = np.asarray(obj)
- x = memoryview(obj)
- y = np.asarray(x)
- y2 = np.array(x)
- assert_(not y.flags.owndata)
- assert_(y2.flags.owndata)
- assert_equal(y.dtype, obj.dtype)
- assert_array_equal(obj, y)
- assert_equal(y2.dtype, obj.dtype)
- assert_array_equal(obj, y2)
-
- def test_roundtrip(self):
- x = np.array([1,2,3,4,5], dtype='i4')
- self._check_roundtrip(x)
- x = np.array([[1,2],[3,4]], dtype=np.float64)
- self._check_roundtrip(x)
+if sys.version_info[:2] == (2, 6):
+ from numpy.core.multiarray import memorysimpleview as memoryview
- x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
- self._check_roundtrip(x)
+from numpy.core._internal import _dtype_from_pep3118
- dt = [('a', 'b'),
- ('b', 'h'),
- ('c', 'i'),
- ('d', 'l'),
- ('dx', 'q'),
- ('e', 'B'),
- ('f', 'H'),
- ('g', 'I'),
- ('h', 'L'),
- ('hx', 'Q'),
- ('i', np.single),
- ('j', np.double),
- ('k', np.longdouble),
- ('ix', np.csingle),
- ('jx', np.cdouble),
- ('kx', np.clongdouble),
- ('l', 'S4'),
- ('m', 'U4'),
- ('n', 'V3'),
- ('o', '?'),
- ('p', np.half),
- ]
- x = np.array(
- [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
- dtype=dt)
- self._check_roundtrip(x)
+class TestPEP3118Dtype(object):
+ def _check(self, spec, wanted):
+ dt = np.dtype(wanted)
+ if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
+ if wanted[-1][0] == '':
+ names = list(dt.names)
+ names[-1] = ''
+ dt.names = tuple(names)
+ assert_equal(_dtype_from_pep3118(spec), dt,
+ err_msg="spec %r != dtype %r" % (spec, wanted))
- x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))])
- self._check_roundtrip(x)
+ def test_native_padding(self):
+ align = np.dtype('i').alignment
+ for j in range(8):
+ if j == 0:
+ s = 'bi'
+ else:
+ s = 'b%dxi' % j
+ self._check('@'+s, {'f0': ('i1', 0),
+ 'f1': ('i', align*(1 + j//align))})
+ self._check('='+s, {'f0': ('i1', 0),
+ 'f1': ('i', 1+j)})
+
+ def test_native_padding_2(self):
+ # Native padding should work also for structs and sub-arrays
+ self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
+ self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
+
+ def test_trailing_padding(self):
+ # Trailing padding should be included, *and*, the item size
+ # should match the alignment if in aligned mode
+ align = np.dtype('i').alignment
+ def VV(n):
+ return 'V%d' % (align*(1 + (n-1)//align))
+
+ self._check('ix', [('f0', 'i'), ('', VV(1))])
+ self._check('ixx', [('f0', 'i'), ('', VV(2))])
+ self._check('ixxx', [('f0', 'i'), ('', VV(3))])
+ self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
+ self._check('i7x', [('f0', 'i'), ('', VV(7))])
+
+ self._check('^ix', [('f0', 'i'), ('', 'V1')])
+ self._check('^ixx', [('f0', 'i'), ('', 'V2')])
+ self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
+ self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
+ self._check('^i7x', [('f0', 'i'), ('', 'V7')])
+
+ def test_native_padding_3(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'),
+ ('sub', np.dtype('b,i')), ('c', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
+
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+ ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+ self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
+
+ def test_padding_with_array_inside_struct(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
+ ('d', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
+
+ def test_byteorder_inside_struct(self):
+ # The byte order after @T{=i} should be '=', not '@'.
+ # Check this by noting the absence of native alignment.
+ self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
+ 'f1': ('i', 5)})
+
+ def test_intra_padding(self):
+ # Natively aligned sub-arrays may require some internal padding
+ align = np.dtype('i').alignment
+ def VV(n):
+ return 'V%d' % (align*(1 + (n-1)//align))
+
+ self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
+
+class TestNewBufferProtocol(object):
+ def _check_roundtrip(self, obj):
+ obj = np.asarray(obj)
+ x = memoryview(obj)
+ y = np.asarray(x)
+ y2 = np.array(x)
+ assert_(not y.flags.owndata)
+ assert_(y2.flags.owndata)
+ assert_equal(y.dtype, obj.dtype)
+ assert_array_equal(obj, y)
+ assert_equal(y2.dtype, obj.dtype)
+ assert_array_equal(obj, y2)
- x = np.array([1,2,3], dtype='>i2')
+ def test_roundtrip(self):
+ x = np.array([1,2,3,4,5], dtype='i4')
+ self._check_roundtrip(x)
+
+ x = np.array([[1,2],[3,4]], dtype=np.float64)
+ self._check_roundtrip(x)
+
+ x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
+ self._check_roundtrip(x)
+
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
+ dtype=dt)
+ self._check_roundtrip(x)
+
+ x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))])
+ self._check_roundtrip(x)
+
+ x = np.array([1,2,3], dtype='>i2')
+ self._check_roundtrip(x)
+
+ x = np.array([1,2,3], dtype='<i2')
+ self._check_roundtrip(x)
+
+ x = np.array([1,2,3], dtype='>i4')
+ self._check_roundtrip(x)
+
+ x = np.array([1,2,3], dtype='<i4')
+ self._check_roundtrip(x)
+
+ # Native-only data types can be passed through the buffer interface
+ # only in native byte order
+ if sys.byteorder == 'little':
+ x = np.array([1,2,3], dtype='>q')
+ assert_raises(ValueError, self._check_roundtrip, x)
+ x = np.array([1,2,3], dtype='<q')
self._check_roundtrip(x)
-
- x = np.array([1,2,3], dtype='<i2')
+ else:
+ x = np.array([1,2,3], dtype='>q')
self._check_roundtrip(x)
+ x = np.array([1,2,3], dtype='<q')
+ assert_raises(ValueError, self._check_roundtrip, x)
+
+ def test_roundtrip_half(self):
+ half_list = [
+ 1.0,
+ -2.0,
+ 6.5504 * 10**4, # (max half precision)
+ 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
+ 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
+ 0.0,
+ -0.0,
+ float('+inf'),
+ float('-inf'),
+ 0.333251953125, # ~= 1/3
+ ]
- x = np.array([1,2,3], dtype='>i4')
- self._check_roundtrip(x)
+ x = np.array(half_list, dtype='>e')
+ self._check_roundtrip(x)
+ x = np.array(half_list, dtype='<e')
+ self._check_roundtrip(x)
+
+ def test_export_simple_1d(self):
+ x = np.array([1,2,3,4,5], dtype='i')
+ y = memoryview(x)
+ assert_equal(y.format, 'i')
+ assert_equal(y.shape, (5,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.strides, (4,))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 4)
+
+ def test_export_simple_nd(self):
+ x = np.array([[1,2],[3,4]], dtype=np.float64)
+ y = memoryview(x)
+ assert_equal(y.format, 'd')
+ assert_equal(y.shape, (2, 2))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (16, 8))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 8)
+
+ def test_export_discontiguous(self):
+ x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
+ y = memoryview(x)
+ assert_equal(y.format, 'f')
+ assert_equal(y.shape, (3, 3))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (36, 4))
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 4)
+
+ def test_export_record(self):
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
+ dtype=dt)
+ y = memoryview(x)
+ assert_equal(y.shape, (1,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.suboffsets, EMPTY)
+
+ sz = sum([dtype(b).itemsize for a, b in dt])
+ if dtype('l').itemsize == 4:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:^q:dx:B:e:@H:f:=I:g:L:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ else:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:^q:dx:B:e:@H:f:=I:g:Q:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
+ if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
+ assert_equal(y.strides, (sz,))
+ assert_equal(y.itemsize, sz)
+
+ def test_export_subarray(self):
+ x = np.array(([[1,2],[3,4]],), dtype=[('a', ('i', (2,2)))])
+ y = memoryview(x)
+ assert_equal(y.format, 'T{(2,2)i:a:}')
+ assert_equal(y.shape, EMPTY)
+ assert_equal(y.ndim, 0)
+ assert_equal(y.strides, EMPTY)
+ assert_equal(y.suboffsets, EMPTY)
+ assert_equal(y.itemsize, 16)
+
+ def test_export_endian(self):
+ x = np.array([1,2,3], dtype='>i')
+ y = memoryview(x)
+ if sys.byteorder == 'little':
+ assert_equal(y.format, '>i')
+ else:
+ assert_equal(y.format, 'i')
- x = np.array([1,2,3], dtype='<i4')
- self._check_roundtrip(x)
+ x = np.array([1,2,3], dtype='<i')
+ y = memoryview(x)
+ if sys.byteorder == 'little':
+ assert_equal(y.format, 'i')
+ else:
+ assert_equal(y.format, '<i')
- # Native-only data types can be passed through the buffer interface
- # only in native byte order
- if sys.byteorder == 'little':
- x = np.array([1,2,3], dtype='>q')
- assert_raises(ValueError, self._check_roundtrip, x)
- x = np.array([1,2,3], dtype='<q')
- self._check_roundtrip(x)
- else:
- x = np.array([1,2,3], dtype='>q')
- self._check_roundtrip(x)
- x = np.array([1,2,3], dtype='<q')
- assert_raises(ValueError, self._check_roundtrip, x)
-
- def test_roundtrip_half(self):
- half_list = [
- 1.0,
- -2.0,
- 6.5504 * 10**4, # (max half precision)
- 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
- 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
- 0.0,
- -0.0,
- float('+inf'),
- float('-inf'),
- 0.333251953125, # ~= 1/3
- ]
-
- x = np.array(half_list, dtype='>e')
- self._check_roundtrip(x)
- x = np.array(half_list, dtype='<e')
+ def test_padding(self):
+ for j in range(8):
+ x = np.array([(1,),(2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
- def test_export_simple_1d(self):
- x = np.array([1,2,3,4,5], dtype='i')
- y = memoryview(x)
- assert_equal(y.format, 'i')
- assert_equal(y.shape, (5,))
- assert_equal(y.ndim, 1)
- assert_equal(y.strides, (4,))
- assert_equal(y.suboffsets, EMPTY)
- assert_equal(y.itemsize, 4)
-
- def test_export_simple_nd(self):
- x = np.array([[1,2],[3,4]], dtype=np.float64)
- y = memoryview(x)
- assert_equal(y.format, 'd')
- assert_equal(y.shape, (2, 2))
- assert_equal(y.ndim, 2)
- assert_equal(y.strides, (16, 8))
- assert_equal(y.suboffsets, EMPTY)
- assert_equal(y.itemsize, 8)
-
- def test_export_discontiguous(self):
- x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
- y = memoryview(x)
- assert_equal(y.format, 'f')
- assert_equal(y.shape, (3, 3))
- assert_equal(y.ndim, 2)
- assert_equal(y.strides, (36, 4))
- assert_equal(y.suboffsets, EMPTY)
- assert_equal(y.itemsize, 4)
-
- def test_export_record(self):
- dt = [('a', 'b'),
- ('b', 'h'),
- ('c', 'i'),
- ('d', 'l'),
- ('dx', 'q'),
- ('e', 'B'),
- ('f', 'H'),
- ('g', 'I'),
- ('h', 'L'),
- ('hx', 'Q'),
- ('i', np.single),
- ('j', np.double),
- ('k', np.longdouble),
- ('ix', np.csingle),
- ('jx', np.cdouble),
- ('kx', np.clongdouble),
- ('l', 'S4'),
- ('m', 'U4'),
- ('n', 'V3'),
- ('o', '?'),
- ('p', np.half),
- ]
- x = np.array(
- [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
- dtype=dt)
- y = memoryview(x)
- assert_equal(y.shape, (1,))
- assert_equal(y.ndim, 1)
- assert_equal(y.suboffsets, EMPTY)
-
- sz = sum([dtype(b).itemsize for a, b in dt])
- if dtype('l').itemsize == 4:
- assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:^q:dx:B:e:@H:f:=I:g:L:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
- else:
- assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:^q:dx:B:e:@H:f:=I:g:Q:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
- assert_equal(y.strides, (sz,))
- assert_equal(y.itemsize, sz)
-
- def test_export_subarray(self):
- x = np.array(([[1,2],[3,4]],), dtype=[('a', ('i', (2,2)))])
- y = memoryview(x)
- assert_equal(y.format, 'T{(2,2)i:a:}')
- assert_equal(y.shape, EMPTY)
- assert_equal(y.ndim, 0)
- assert_equal(y.strides, EMPTY)
- assert_equal(y.suboffsets, EMPTY)
- assert_equal(y.itemsize, 16)
-
- def test_export_endian(self):
- x = np.array([1,2,3], dtype='>i')
- y = memoryview(x)
- if sys.byteorder == 'little':
- assert_equal(y.format, '>i')
- else:
- assert_equal(y.format, 'i')
-
- x = np.array([1,2,3], dtype='<i')
- y = memoryview(x)
- if sys.byteorder == 'little':
- assert_equal(y.format, 'i')
- else:
- assert_equal(y.format, '<i')
-
- def test_padding(self):
- for j in xrange(8):
- x = np.array([(1,),(2,)], dtype={'f0': (int, j)})
- self._check_roundtrip(x)
-
- def test_reference_leak(self):
- count_1 = sys.getrefcount(np.core._internal)
- a = np.zeros(4)
- b = memoryview(a)
- c = np.asarray(b)
- count_2 = sys.getrefcount(np.core._internal)
- assert_equal(count_1, count_2)
-
- def test_padded_struct_array(self):
- dt1 = np.dtype(
- [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
- align=True)
- x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
- self._check_roundtrip(x1)
-
- dt2 = np.dtype(
- [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
- align=True)
- x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
- self._check_roundtrip(x2)
-
- dt3 = np.dtype(
- [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
- ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
- x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
- self._check_roundtrip(x3)
-
-
- class TestArrayAttributeDeletion(object):
-
- def test_multiarray_writable_attributes_deletion(self):
- """ticket #2046, should not seqfault, raise AttributeError"""
- a = np.ones(2)
- attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
- for s in attr:
- assert_raises(AttributeError, delattr, a, s)
-
-
- def test_multiarray_not_writable_attributes_deletion(self):
- a = np.ones(2)
- attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
- "ctypes", "T", "__array_interface__", "__array_struct__",
- "__array_priority__", "__array_finalize__"]
- for s in attr:
- assert_raises(AttributeError, delattr, a, s)
-
-
- def test_multiarray_flags_writable_attribute_deletion(self):
- a = np.ones(2).flags
- attr = ['updateifcopy', 'aligned', 'writeable']
- for s in attr:
- assert_raises(AttributeError, delattr, a, s)
-
-
- def test_multiarray_flags_not_writable_attribute_deletion(self):
- a = np.ones(2).flags
- attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
- "owndata", "fnc", "forc", "behaved", "carray", "farray",
- "num"]
- for s in attr:
- assert_raises(AttributeError, delattr, a, s)
+ def test_reference_leak(self):
+ count_1 = sys.getrefcount(np.core._internal)
+ a = np.zeros(4)
+ b = memoryview(a)
+ c = np.asarray(b)
+ count_2 = sys.getrefcount(np.core._internal)
+ assert_equal(count_1, count_2)
+
+ def test_padded_struct_array(self):
+ dt1 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
+ align=True)
+ x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
+ self._check_roundtrip(x1)
+
+ dt2 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
+ align=True)
+ x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
+ self._check_roundtrip(x2)
+
+ dt3 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+ ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+ x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
+ self._check_roundtrip(x3)
+
+
+class TestArrayAttributeDeletion(object):
+
+ def test_multiarray_writable_attributes_deletion(self):
+ """ticket #2046, should not seqfault, raise AttributeError"""
+ a = np.ones(2)
+ attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+ def test_multiarray_not_writable_attributes_deletion(self):
+ a = np.ones(2)
+ attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
+ "ctypes", "T", "__array_interface__", "__array_struct__",
+ "__array_priority__", "__array_finalize__"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+ def test_multiarray_flags_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ['updateifcopy', 'aligned', 'writeable']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+ def test_multiarray_flags_not_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
+ "owndata", "fnc", "forc", "behaved", "carray", "farray",
+ "num"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
@@ -2859,6 +3386,13 @@ def test_array_interface():
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
+ # test scalar with no shape
+ class ArrayLike(object):
+ array = np.array(1)
+ __array_interface__ = array.__array_interface__
+ assert_equal(np.array(ArrayLike()), 1)
+
+
def test_flat_element_deletion():
it = np.ones(3).flat
try:
@@ -2894,6 +3428,158 @@ class TestMapIter(TestCase):
[ 104., 5., 6., 7.,],
[ 8., 9., 40., 11.,]])
+ b = arange(6).astype(float)
+ index = (array([1,2,0]),)
+ vals = [50,4,100.1]
+ test_inplace_increment(b, index, vals)
+ assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ])
+
+
+class PriorityNdarray():
+ __array_priority__ = 1000
+
+ def __init__(self, array):
+ self.array = array
+
+ def __lt__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array < array)
+
+ def __gt__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array > array)
+
+ def __le__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array <= array)
+
+ def __ge__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array >= array)
+
+ def __eq__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array == array)
+
+ def __ne__(self, array):
+ if isinstance(array, PriorityNdarray):
+ array = array.array
+ return PriorityNdarray(self.array != array)
+
+
+class TestArrayPriority(TestCase):
+ def test_lt(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l < r
+ res2 = l < rp
+ res3 = lp < r
+ res4 = lp < rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
+
+ def test_gt(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l > r
+ res2 = l > rp
+ res3 = lp > r
+ res4 = lp > rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
+
+ def test_le(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l <= r
+ res2 = l <= rp
+ res3 = lp <= r
+ res4 = lp <= rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
+
+ def test_ge(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l >= r
+ res2 = l >= rp
+ res3 = lp >= r
+ res4 = lp >= rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
+
+ def test_eq(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l == r
+ res2 = l == rp
+ res3 = lp == r
+ res4 = lp == rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
+
+ def test_ne(self):
+ l = np.asarray([0., -1., 1.], dtype=dtype)
+ r = np.asarray([0., 1., -1.], dtype=dtype)
+ lp = PriorityNdarray(l)
+ rp = PriorityNdarray(r)
+ res1 = l != r
+ res2 = l != rp
+ res3 = lp != r
+ res4 = lp != rp
+
+ assert_array_equal(res1, res2.array)
+ assert_array_equal(res1, res3.array)
+ assert_array_equal(res1, res4.array)
+ assert_(isinstance(res1, np.ndarray))
+ assert_(isinstance(res2, PriorityNdarray))
+ assert_(isinstance(res3, PriorityNdarray))
+ assert_(isinstance(res4, PriorityNdarray))
if __name__ == "__main__":
diff --git a/numpy/core/tests/test_multiarray_assignment.py b/numpy/core/tests/test_multiarray_assignment.py
index 29ddcf906..65a09086b 100644
--- a/numpy/core/tests/test_multiarray_assignment.py
+++ b/numpy/core/tests/test_multiarray_assignment.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import TestCase
@@ -43,7 +45,7 @@ def _indices(ndims):
# no itertools.product available in Py2.4
res = [[]]
- for i in xrange(ndims):
+ for i in range(ndims):
newres = []
for elem in ind:
for others in res:
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 4abe5e2ff..76d85618f 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1,10 +1,13 @@
+from __future__ import division, absolute_import, print_function
+
+import sys, warnings
+
import numpy as np
from numpy import array, arange, nditer, all
-from numpy.compat import asbytes
+from numpy.compat import asbytes, sixu
from numpy.testing import *
-import sys, warnings
-import warnings
+
def iter_multi_index(i):
ret = []
@@ -598,6 +601,8 @@ def test_iter_itershape():
[['readonly'], ['writeonly','allocate']],
op_axes=[[0,1,None], None],
itershape=(-1,1,4))
+ # Test bug that for no op_axes but itershape, they are NULLed correctly
+ i = np.nditer([np.ones(2), None, None], itershape=(2,))
def test_iter_broadcasting_errors():
# Check that errors are thrown for bad broadcasting shapes
@@ -632,7 +637,7 @@ def test_iter_broadcasting_errors():
[],
[['readonly'], ['readonly'], ['writeonly','no_broadcast']])
assert_(False, 'Should have raised a broadcast error')
- except ValueError, e:
+ except ValueError as e:
msg = str(e)
# The message should contain the shape of the 3rd operand
assert_(msg.find('(2,3)') >= 0,
@@ -647,7 +652,7 @@ def test_iter_broadcasting_errors():
op_axes=[[0,1], [0,np.newaxis]],
itershape=(4,3))
assert_(False, 'Should have raised a broadcast error')
- except ValueError, e:
+ except ValueError as e:
msg = str(e)
# The message should contain "shape->remappedshape" for each operand
assert_(msg.find('(2,3)->(2,3)') >= 0,
@@ -664,7 +669,7 @@ def test_iter_broadcasting_errors():
[],
[['writeonly','no_broadcast'], ['readonly']])
assert_(False, 'Should have raised a broadcast error')
- except ValueError, e:
+ except ValueError as e:
msg = str(e)
# The message should contain the shape of the bad operand
assert_(msg.find('(2,1,1)') >= 0,
@@ -1206,7 +1211,8 @@ def test_iter_copy():
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (2,18)
- i.next(); i.next()
+ next(i)
+ next(i)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
@@ -1431,32 +1437,32 @@ def test_iter_iterindex():
a = arange(24).reshape(4,3,2)
for flags in ([], ['buffered']):
i = nditer(a, flags, buffersize=buffersize)
- assert_equal(iter_iterindices(i), range(24))
+ assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 2
- assert_equal(iter_iterindices(i), range(2,24))
+ assert_equal(iter_iterindices(i), list(range(2,24)))
i = nditer(a, flags, order='F', buffersize=buffersize)
- assert_equal(iter_iterindices(i), range(24))
+ assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 5
- assert_equal(iter_iterindices(i), range(5,24))
+ assert_equal(iter_iterindices(i), list(range(5,24)))
i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
- assert_equal(iter_iterindices(i), range(24))
+ assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 9
- assert_equal(iter_iterindices(i), range(9,24))
+ assert_equal(iter_iterindices(i), list(range(9,24)))
i = nditer(a[::-1,::-1], flags, order='C', buffersize=buffersize)
- assert_equal(iter_iterindices(i), range(24))
+ assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 13
- assert_equal(iter_iterindices(i), range(13,24))
+ assert_equal(iter_iterindices(i), list(range(13,24)))
i = nditer(a[::1,::-1], flags, buffersize=buffersize)
- assert_equal(iter_iterindices(i), range(24))
+ assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 23
- assert_equal(iter_iterindices(i), range(23,24))
+ assert_equal(iter_iterindices(i), list(range(23,24)))
i.reset()
i.iterindex = 2
- assert_equal(iter_iterindices(i), range(2,24))
+ assert_equal(iter_iterindices(i), list(range(2,24)))
def test_iter_iterrange():
# Make sure getting and resetting the iterrange works
@@ -1568,7 +1574,7 @@ def test_iter_buffering_delayed_alloc():
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0,1])
- assert_equal([[x[0][()],x[1][()]] for x in i], zip(range(6), [1]*6))
+ assert_equal([[x[0][()],x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
@@ -1800,7 +1806,7 @@ def test_iter_buffered_cast_subarray():
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
- for x, count in zip(i, range(6)):
+ for x, count in zip(i, list(range(6))):
assert_(np.all(x['a'] == count))
# one element -> many -> back (copies it to all)
@@ -2002,7 +2008,7 @@ def test_iter_buffering_string():
assert_raises(TypeError,nditer,a,['buffered'],['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
- assert_equal(i[0], u'abc')
+ assert_equal(i[0], sixu('abc'))
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_growinner():
@@ -2013,6 +2019,57 @@ def test_iter_buffering_growinner():
# Should end up with just one inner loop here
assert_equal(i[0].size, a.size)
+
+@dec.slow
+def test_iter_buffered_reduce_reuse():
+ # large enough array for all views, including negative strides.
+ a = np.arange(2*3**5)[3**5:3**5+1]
+ flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
+ op_flags = [('readonly',), ('readwrite','allocate')]
+ op_axes_list = [[(0,1,2), (0,1,-1)], [(0,1,2), (0,-1,-1)]]
+ # wrong dtype to force buffering
+ op_dtypes = [np.float, a.dtype]
+
+ def get_params():
+ for xs in range(-3**2, 3**2 + 1):
+ for ys in range(xs, 3**2 + 1):
+ for op_axes in op_axes_list:
+ # last stride is reduced and because of that not
+ # important for this test, as it is the inner stride.
+ strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
+ arr = np.lib.stride_tricks.as_strided(a, (3,3,3), strides)
+
+ for skip in [0, 1]:
+ yield arr, op_axes, skip
+
+ for arr, op_axes, skip in get_params():
+ nditer2 = np.nditer([arr.copy(), None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ op_dtypes=op_dtypes)
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
+
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
+
+ comp_res = nditer2.operands[-1]
+
+ for bufsize in range(0, 3**3):
+ nditer1 = np.nditer([arr, None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ buffersize=bufsize, op_dtypes=op_dtypes)
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
+
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
+
+ res = nditer1.operands[-1]
+ assert_array_equal(res, comp_res)
+
+
def test_iter_no_broadcast():
# Test that the no_broadcast flag works
a = np.arange(24).reshape(2,3,4)
@@ -2468,5 +2525,59 @@ def test_iter_allocated_array_dtypes():
c[1,1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
+
+def test_0d_iter():
+ # Basic test for iteration of 0-d arrays:
+ i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.ndim, 0)
+ assert_equal(next(i), (2, 3))
+ assert_equal(i.multi_index, ())
+ assert_equal(i.iterindex, 0)
+ assert_raises(StopIteration, next, i)
+ # test reset:
+ i.reset()
+ assert_equal(next(i), (2, 3))
+ assert_raises(StopIteration, next, i)
+
+ # test forcing to 0-d
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+ # note that itershape=(), still behaves like None due to the conversions
+
+ # Test a more complex buffered casting case (same as another test above)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')]
+ a = np.array(0.5, dtype='f4')
+ i = nditer(a, ['buffered','refs_ok'], ['readonly'],
+ casting='unsafe', op_dtypes=sdt)
+ vals = next(i)
+ assert_equal(vals['a'], 0.5)
+ assert_equal(vals['b'], 0)
+ assert_equal(vals['c'], [[(0.5)]*3]*2)
+ assert_equal(vals['d'], 0.5)
+
+
+def test_0d_nested_iter():
+ a = np.arange(12).reshape(2,3,2)
+ i, j = np.nested_iters(a, [[],[1,0,2]])
+ vals = []
+ for x in i:
+ vals.append([y for y in j])
+ assert_equal(vals, [[0,1,2,3,4,5,6,7,8,9,10,11]])
+
+ i, j = np.nested_iters(a, [[1,0,2],[]])
+ vals = []
+ for x in i:
+ vals.append([y for y in j])
+ assert_equal(vals, [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9],[10],[11]])
+
+ i, j, k = np.nested_iters(a, [[2,0], [] ,[1]])
+ vals = []
+ for x in i:
+ for y in j:
+ vals.append([z for z in k])
+ assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]])
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 480b43811..341884f04 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1,13 +1,16 @@
+from __future__ import division, absolute_import, print_function
+
import sys
+import platform
from decimal import Decimal
+import warnings
import numpy as np
from numpy.core import *
from numpy.random import rand, randint, randn
from numpy.testing import *
-from numpy.testing.utils import WarningManager
from numpy.core.multiarray import dot as dot_
-import warnings
+
class Vec(object):
def __init__(self,sequence=None):
@@ -175,18 +178,33 @@ class TestNonarrayArgs(TestCase):
assert_(all(mean(A,0) == array([2.5,3.5,4.5])))
assert_(all(mean(A,1) == array([2.,5.])))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(isnan(mean([])))
+ assert_(w[0].category is RuntimeWarning)
+
def test_std(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(std(A), 1.707825127659933)
assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5]))
assert_almost_equal(std(A,1), array([0.81649658, 0.81649658]))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(isnan(std([])))
+ assert_(w[0].category is RuntimeWarning)
+
def test_var(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(var(A), 2.9166666666666665)
assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25]))
assert_almost_equal(var(A,1), array([0.66666667, 0.66666667]))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(isnan(var([])))
+ assert_(w[0].category is RuntimeWarning)
+
class TestBoolScalar(TestCase):
def test_logical(self):
@@ -221,6 +239,140 @@ class TestBoolScalar(TestCase):
self.assertTrue((f ^ f) is f)
+class TestBoolArray(TestCase):
+ def setUp(self):
+ # offset for simd tests
+ self.t = array([True] * 41, dtype=np.bool)[1::]
+ self.f = array([False] * 41, dtype=np.bool)[1::]
+ self.o = array([False] * 42, dtype=np.bool)[2::]
+ self.nm = self.f.copy()
+ self.im = self.t.copy()
+ self.nm[3] = True
+ self.nm[-2] = True
+ self.im[3] = False
+ self.im[-2] = False
+
+ def test_all_any(self):
+ self.assertTrue(self.t.all())
+ self.assertTrue(self.t.any())
+ self.assertFalse(self.f.all())
+ self.assertFalse(self.f.any())
+ self.assertTrue(self.nm.any())
+ self.assertTrue(self.im.any())
+ self.assertFalse(self.nm.all())
+ self.assertFalse(self.im.all())
+ # check bad element in all positions
+ for i in range(256 - 7):
+ d = array([False] * 256, dtype=np.bool)[7::]
+ d[i] = True
+ self.assertTrue(np.any(d))
+ e = array([True] * 256, dtype=np.bool)[7::]
+ e[i] = False
+ self.assertFalse(np.all(e))
+ assert_array_equal(e, ~d)
+ # big array test for blocked libc loops
+ for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
+ d = array([False] * 100043, dtype=np.bool)
+ d[i] = True
+ self.assertTrue(np.any(d), msg="%r" % i)
+ e = array([True] * 100043, dtype=np.bool)
+ e[i] = False
+ self.assertFalse(np.all(e), msg="%r" % i)
+
+ def test_logical_not_abs(self):
+ assert_array_equal(~self.t, self.f)
+ assert_array_equal(np.abs(~self.t), self.f)
+ assert_array_equal(np.abs(~self.f), self.t)
+ assert_array_equal(np.abs(self.f), self.f)
+ assert_array_equal(~np.abs(self.f), self.t)
+ assert_array_equal(~np.abs(self.t), self.f)
+ assert_array_equal(np.abs(~self.nm), self.im)
+ np.logical_not(self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+ np.abs(self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+
+ def test_logical_and_or_xor(self):
+ assert_array_equal(self.t | self.t, self.t)
+ assert_array_equal(self.f | self.f, self.f)
+ assert_array_equal(self.t | self.f, self.t)
+ assert_array_equal(self.f | self.t, self.t)
+ np.logical_or(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t & self.t, self.t)
+ assert_array_equal(self.f & self.f, self.f)
+ assert_array_equal(self.t & self.f, self.f)
+ assert_array_equal(self.f & self.t, self.f)
+ np.logical_and(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t ^ self.t, self.f)
+ assert_array_equal(self.f ^ self.f, self.f)
+ assert_array_equal(self.t ^ self.f, self.t)
+ assert_array_equal(self.f ^ self.t, self.t)
+ np.logical_xor(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+
+ assert_array_equal(self.nm & self.t, self.nm)
+ assert_array_equal(self.im & self.f, False)
+ assert_array_equal(self.nm & True, self.nm)
+ assert_array_equal(self.im & False, self.f)
+ assert_array_equal(self.nm | self.t, self.t)
+ assert_array_equal(self.im | self.f, self.im)
+ assert_array_equal(self.nm | True, self.t)
+ assert_array_equal(self.im | False, self.im)
+ assert_array_equal(self.nm ^ self.t, self.im)
+ assert_array_equal(self.im ^ self.f, self.im)
+ assert_array_equal(self.nm ^ True, self.im)
+ assert_array_equal(self.im ^ False, self.im)
+
+
+class TestBoolCmp(TestCase):
+ def setUp(self):
+ self.f = ones(256, dtype=np.float32)
+ self.ef = ones(self.f.size, dtype=np.bool)
+ self.d = ones(128, dtype=np.float64)
+ self.ed = ones(self.d.size, dtype=np.bool)
+ # generate values for all permutation of 256bit simd vectors
+ s = 0
+ for i in range(32):
+ self.f[s:s+8] = [i & 2**x for x in range(8)]
+ self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
+ s += 8
+ s = 0
+ for i in range(16):
+ self.d[s:s+4] = [i & 2**x for x in range(4)]
+ self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
+ s += 4
+
+ def test_float(self):
+ # offset for alignment test
+ for i in range(4):
+ assert_array_equal(self.f[i:] != 0, self.ef[i:])
+ assert_array_equal(self.f[i:] > 0, self.ef[i:])
+ assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
+ assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
+ assert_array_equal(-self.f[i:] < 0, self.ef[i:])
+ assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
+
+ assert_array_equal(0 != self.f[i:], self.ef[i:])
+ assert_array_equal(np.zeros_like(self.f)[i:] != self.f[i:],
+ self.ef[i:])
+
+ def test_double(self):
+ # offset for alignment test
+ for i in range(2):
+ assert_array_equal(self.d[i:] != 0, self.ed[i:])
+ assert_array_equal(self.d[i:] > 0, self.ed[i:])
+ assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
+ assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
+ assert_array_equal(-self.d[i:] < 0, self.ed[i:])
+ assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
+
+ assert_array_equal(0 != self.d[i:], self.ed[i:])
+ assert_array_equal(np.zeros_like(self.d)[i:] != self.d[i:],
+ self.ed[i:])
+
+
class TestSeterr(TestCase):
def test_default(self):
err = geterr()
@@ -232,8 +384,8 @@ class TestSeterr(TestCase):
))
def test_set(self):
- err = seterr()
- try:
+ with np.errstate():
+ err = seterr()
old = seterr(divide='print')
self.assertTrue(err == old)
new = seterr()
@@ -243,12 +395,10 @@ class TestSeterr(TestCase):
self.assertTrue(new['divide'] == 'print')
seterr(**old)
self.assertTrue(geterr() == old)
- finally:
- seterr(**err)
+ @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_divide_err(self):
- err = seterr(divide='raise')
- try:
+ with errstate(divide='raise'):
try:
array([1.]) / array([0.])
except FloatingPointError:
@@ -257,8 +407,6 @@ class TestSeterr(TestCase):
self.fail()
seterr(divide='ignore')
array([1.]) / array([0.])
- finally:
- seterr(**err)
class TestFloatExceptions(TestCase):
@@ -268,18 +416,17 @@ class TestFloatExceptions(TestCase):
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
- except FloatingPointError, exc:
+ except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
- """Check that fpe exception is raised.
-
- Given a floating operation `flop` and two scalar values, check that
- the operation raises the floating point exception specified by
- `fpeerr`. Tests all variants with 0-d array scalars as well.
+ # Check that fpe exception is raised.
+ #
+ # Given a floating operation `flop` and two scalar values, check that
+ # the operation raises the floating point exception specified by
+ #`fpeerr`. Tests all variants with 0-d array scalars as well.
- """
self.assert_raises_fpe(fpeerr, flop, sc1, sc2);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2);
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]);
@@ -287,9 +434,8 @@ class TestFloatExceptions(TestCase):
@dec.knownfailureif(True, "See ticket 1755")
def test_floating_exceptions(self):
- """Test basic arithmetic function errors"""
- oldsettings = np.seterr(all='raise')
- try:
+ # Test basic arithmetic function errors
+ with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
@@ -340,12 +486,10 @@ class TestFloatExceptions(TestCase):
lambda a,b:a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a*b, ftype(0), ftype(np.inf))
- finally:
- np.seterr(**oldsettings)
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
- """Tests that the scalars get coerced correctly."""
+ #Tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0)
u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0)
@@ -403,9 +547,11 @@ class TestTypes(TestCase):
assert_equal(promote_func(array([b]),u64), np.dtype(uint64))
assert_equal(promote_func(array([i8]),f64), np.dtype(float64))
assert_equal(promote_func(array([u16]),f64), np.dtype(float64))
+
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(array([u16]), i32), np.dtype(uint16))
+
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
@@ -464,6 +610,7 @@ class TestTypes(TestCase):
def test_result_type(self):
self.check_promotion_cases(np.result_type)
+ assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
@@ -514,9 +661,14 @@ class TestTypes(TestCase):
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
+
+# Custom exception class to test exception propagation in fromiter
+class NIterError(Exception): pass
+
+
class TestFromiter(TestCase):
def makegen(self):
- for x in xrange(24):
+ for x in range(24):
yield x**2
def test_types(self):
@@ -533,12 +685,8 @@ class TestFromiter(TestCase):
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
- try:
- fromiter(self.makegen(), int, len(expected) + 10)
- except ValueError:
- pass
- else:
- self.fail()
+ self.assertRaises(ValueError, fromiter,
+ self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = array(list(self.makegen()))
@@ -547,6 +695,28 @@ class TestFromiter(TestCase):
self.assertTrue(alltrue(a == expected,axis=0))
self.assertTrue(alltrue(a20 == expected[:20],axis=0))
+ def load_data(self, n, eindex):
+ # Utility method for the issue 2592 tests.
+ # Raise an exception at the desired index in the iterator.
+ for e in range(n):
+ if e == eindex:
+ raise NIterError('error at index %s' % eindex)
+ yield e
+
+ def test_2592(self):
+ # Test iteration exceptions are correctly raised.
+ count, eindex = 10, 5
+ self.assertRaises(NIterError, np.fromiter,
+ self.load_data(count, eindex), dtype=int, count=count)
+
+ def test_2592_edge(self):
+ # Test iter. exceptions, edge case (exception at end of iterator).
+ count = 10
+ eindex = count-1
+ self.assertRaises(NIterError, np.fromiter,
+ self.load_data(count, eindex), dtype=int, count=count)
+
+
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(array([])), 0)
@@ -652,6 +822,12 @@ class TestArrayComparisons(TestCase):
res = array_equal(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
+ res = array_equal(array(['a'], dtype='S1'), array(['a'], dtype='S1'))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = array_equal(array([('a',1)], dtype='S1,u4'), array([('a',1)], dtype='S1,u4'))
+ assert_(res)
+ assert_(type(res) is bool)
def test_array_equiv(self):
res = array_equiv(array([1,2]), array([1,2]))
@@ -740,7 +916,7 @@ class TestClip(TestCase):
# Now the real test cases
def test_simple_double(self):
- """Test native double input with scalar min/max."""
+ #Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
@@ -749,7 +925,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_int(self):
- """Test native int input with scalar min/max."""
+ #Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
@@ -759,7 +935,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_array_double(self):
- """Test native double input with array min/max."""
+ #Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = zeros(a.shape)
M = m + 0.5
@@ -768,8 +944,8 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
- """Test non native double input with scalar min/max.
- Test native double input with non native double scalar min/max."""
+ #Test non native double input with scalar min/max.
+ #Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
@@ -777,7 +953,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M)
assert_array_equal(ac, act)
- "Test native double input with non native double scalar min/max."
+ #Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
@@ -787,9 +963,8 @@ class TestClip(TestCase):
assert_array_equal(ac, act)
def test_simple_complex(self):
- """Test native complex input with native double scalar min/max.
- Test native input with complex double scalar min/max.
- """
+ #Test native complex input with native double scalar min/max.
+ #Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
@@ -797,7 +972,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
- "Test native input with complex double scalar min/max."
+ #Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
@@ -806,7 +981,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_non_contig(self):
- """Test clip for non contiguous native input and native scalar min/max."""
+ #Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
@@ -816,7 +991,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_out(self):
- """Test native double input with scalar min/max."""
+ #Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
@@ -827,7 +1002,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
- """Test native int32 input with double min/max and int32 out."""
+ #Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
@@ -838,7 +1013,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
- """Test native int32 input with int32 scalar min/max and int64 out."""
+ #Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
@@ -849,7 +1024,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
- """Test native int32 input with double array min/max and int32 out."""
+ #Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
@@ -860,7 +1035,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
- """Test native double input with scalar min/max and int out."""
+ #Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
@@ -871,7 +1046,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
- """Test native double input with array min/max in-place."""
+ #Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
@@ -881,7 +1056,7 @@ class TestClip(TestCase):
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
- """Test native double input with scalar min/max in-place."""
+ #Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
@@ -891,7 +1066,7 @@ class TestClip(TestCase):
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
- """Test non contiguous double input with double scalar min/max in-place."""
+ #Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
@@ -904,7 +1079,7 @@ class TestClip(TestCase):
assert_array_equal(a, ac)
def test_type_cast_01(self):
- "Test native double input with scalar min/max."
+ #Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
@@ -913,7 +1088,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
- "Test native int32 input with int32 scalar min/max."
+ #Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int32)
m = -2
@@ -923,7 +1098,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
- "Test native int32 input with float64 scalar min/max."
+ #Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
@@ -932,7 +1107,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
- "Test native int32 input with float32 scalar min/max."
+ #Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = float32(-2)
M = float32(4)
@@ -941,7 +1116,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
- "Test native int32 with double arrays min/max."
+ #Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
@@ -950,7 +1125,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
- "Test native with NON native scalar min/max."
+ #Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
@@ -960,7 +1135,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
- "Test NON native with native array min/max."
+ #Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
@@ -971,7 +1146,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
- "Test NON native with native scalar min/max."
+ #Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
@@ -982,7 +1157,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
- "Test native with NON native array min/max."
+ #Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
@@ -993,7 +1168,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
- """Test native int32 with float min/max and float out for output argument."""
+ #Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = float32(-0.5)
@@ -1003,7 +1178,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
- "Test non native with native scalar, min/max, out non native"
+ #Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
@@ -1015,7 +1190,7 @@ class TestClip(TestCase):
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
- "Test native int32 input and min/max and float out"
+ #Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = int32(0)
@@ -1025,7 +1200,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
- "Test native double input with scalar min/max"
+ #Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
@@ -1036,7 +1211,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
- "Test native int32 input with double min/max and int32 out"
+ #Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
@@ -1047,7 +1222,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
- "Test native int32 input with int32 scalar min/max and int64 out"
+ #Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
@@ -1058,7 +1233,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
- "Test native int32 input with double array min/max and int32 out"
+ #Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
@@ -1069,7 +1244,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
- "Test native double input with scalar min/max and int out"
+ #Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
@@ -1080,7 +1255,7 @@ class TestClip(TestCase):
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
- "Test native double input with array min/max"
+ #Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
@@ -1090,7 +1265,7 @@ class TestClip(TestCase):
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
- "Test native double input with scalar min/max"
+ #Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
@@ -1100,8 +1275,7 @@ class TestClip(TestCase):
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
- """ Ensure that the clip() function takes an out= argument.
- """
+ # Ensure that the clip() function takes an out= argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
@@ -1129,7 +1303,7 @@ class TestAllclose(object):
assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y))
def test_ip_allclose(self):
- """Parametric test factory."""
+ #Parametric test factory.
arr = array([100,1000])
aran = arange(125).reshape((5,5,5))
@@ -1149,7 +1323,7 @@ class TestAllclose(object):
yield (self.tst_allclose,x,y)
def test_ip_not_allclose(self):
- """Parametric test factory."""
+ #Parametric test factory.
aran = arange(125).reshape((5,5,5))
atol = self.atol
@@ -1268,14 +1442,14 @@ class TestIsclose(object):
def test_masked_arrays(self):
x = np.ma.masked_where([True, True, False], np.arange(3))
- assert_(type(x) == type(isclose(2, x)))
+ assert_(type(x) is type(isclose(2, x)))
x = np.ma.masked_where([True, True, False], [nan, inf, nan])
- assert_(type(x) == type(isclose(inf, x)))
+ assert_(type(x) is type(isclose(inf, x)))
x = np.ma.masked_where([True, True, False], [nan, nan, nan])
y = isclose(nan, x, equal_nan=True)
- assert_(type(x) == type(y))
+ assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
@@ -1289,7 +1463,6 @@ class TestIsclose(object):
assert_array_equal(x, array([inf, 1]))
assert_array_equal(y, array([0, inf]))
-
class TestStdVar(TestCase):
def setUp(self):
self.A = array([1,-1,1,-1])
@@ -1311,7 +1484,6 @@ class TestStdVar(TestCase):
assert_almost_equal(std(self.A,ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
-
class TestStdVarComplex(TestCase):
def test_basic(self):
A = array([1,1.j,-1,-1.j])
@@ -1320,8 +1492,63 @@ class TestStdVarComplex(TestCase):
assert_almost_equal(std(A)**2,real_var)
+class TestCreationFuncs(TestCase):
+
+ '''Test ones, zeros, empty and filled'''
+
+ def setUp(self):
+ self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V')
+ self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
+ self.ndims = 10
+
+ def check_function(self, func, fill_value=None):
+ fill_kwarg = {}
+ if fill_value is not None:
+ fill_kwarg = {'fill_value': fill_value}
+ for size in (0, 1, 2): # size in one dimension
+ for ndims in range(self.ndims): # [], [size], [size, size] etc.
+ shape = ndims * [size]
+ for order in self.orders: # C, F
+ for type in self.dtypes: # bool, int, float etc.
+ for bytes in 2**np.arange(9): # byte size for type
+ try:
+ dtype = np.dtype('{0}{1}'.format(type, bytes))
+ except TypeError: # dtype combination does not exist
+ continue
+ else:
+ # do not fill void type
+ if fill_value is not None and type in 'V':
+ continue
+
+ arr = func(shape, order=order, dtype=dtype,
+ **fill_kwarg)
+
+ assert arr.dtype == dtype
+ assert getattr(arr.flags, self.orders[order])
+
+ if fill_value is not None:
+ if dtype.str.startswith('|S'):
+ val = str(fill_value)
+ else:
+ val = fill_value
+ assert_equal(arr, dtype.type(val))
+
+ def test_zeros(self):
+ self.check_function(np.zeros)
+
+ def test_ones(self):
+ self.check_function(np.zeros)
+
+ def test_empty(self):
+ self.check_function(np.empty)
+
+ def test_filled(self):
+ self.check_function(np.full, 0)
+ self.check_function(np.full, 1)
+
+
class TestLikeFuncs(TestCase):
- '''Test ones_like, zeros_like, and empty_like'''
+ '''Test ones_like, zeros_like, empty_like and full_like'''
def setUp(self):
self.data = [
@@ -1348,10 +1575,26 @@ class TestLikeFuncs(TestCase):
(arange(24).reshape(4,3,2).swapaxes(0,1), '?'),
]
- def check_like_function(self, like_function, value):
+ def compare_array_value(self, dz, value, fill_value):
+ if value is not None:
+ if fill_value:
+ try:
+ z = dz.dtype.type(value)
+ except OverflowError:
+ pass
+ else:
+ assert_(all(dz == z))
+ else:
+ assert_(all(dz == value))
+
+ def check_like_function(self, like_function, value, fill_value=False):
+ if fill_value:
+ fill_kwarg = {'fill_value': value}
+ else:
+ fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
- dz = like_function(d, dtype=dtype)
+ dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(array(dz.strides)*d.dtype.itemsize,
array(d.strides)*dz.dtype.itemsize)
@@ -1361,33 +1604,30 @@ class TestLikeFuncs(TestCase):
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
- if not value is None:
- assert_(all(dz == value))
+ self.compare_array_value(dz, value, fill_value)
# C order, default dtype
- dz = like_function(d, order='C', dtype=dtype)
+ dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
- if not value is None:
- assert_(all(dz == value))
+ self.compare_array_value(dz, value, fill_value)
# F order, default dtype
- dz = like_function(d, order='F', dtype=dtype)
+ dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
- if not value is None:
- assert_(all(dz == value))
+ self.compare_array_value(dz, value, fill_value)
# A order
- dz = like_function(d, order='A', dtype=dtype)
+ dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
@@ -1397,17 +1637,16 @@ class TestLikeFuncs(TestCase):
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
- if not value is None:
- assert_(all(dz == value))
+ self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
a = np.matrix([[1,2],[3,4]])
- b = like_function(a)
+ b = like_function(a, **fill_kwarg)
assert_(type(b) is np.matrix)
- b = like_function(a, subok=False)
- assert_(not (type(b) is np.matrix))
+ b = like_function(a, subok=False, **fill_kwarg)
+ assert_(type(b) is not np.matrix)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
@@ -1418,6 +1657,13 @@ class TestLikeFuncs(TestCase):
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
+ def test_filled_like(self):
+ self.check_like_function(np.full_like, 0, True)
+ self.check_like_function(np.full_like, 1, True)
+ self.check_like_function(np.full_like, 1000, True)
+ self.check_like_function(np.full_like, 123.456, True)
+ self.check_like_function(np.full_like, np.inf, True)
+
class _TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
@@ -1502,5 +1748,27 @@ class TestStringFunction(object):
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
+class TestRoll(TestCase):
+ def test_roll1d(self):
+ x = np.arange(10)
+ xr = np.roll(x, 2)
+ assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
+
+ def test_roll2d(self):
+ x2 = np.reshape(np.arange(10), (2,5))
+ x2r = np.roll(x2, 1)
+ assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, 1, axis=0)
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, 1, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ def test_roll_empty(self):
+ x = np.array([])
+ assert_equal(np.roll(x, 1), np.array([]))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 98a6e07aa..173ebbf9c 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from numpy.compat import asbytes, asunicode
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index d40275ef4..342ca27c6 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -1,10 +1,16 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import *
import nose
import locale
import sys
-from StringIO import StringIO
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
@@ -18,11 +24,7 @@ def check_float_type(tp):
assert_equal(str(tp(1e10)), str(float('1e10')),
err_msg='Failed str formatting for type %s' % tp)
else:
- if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
- sys.version_info[1] <= 5:
- ref = '1e+010'
- else:
- ref = '1e+10'
+ ref = '1e+10'
assert_equal(str(tp(1e10)), ref,
err_msg='Failed str formatting for type %s' % tp)
@@ -66,11 +68,7 @@ def check_complex_type(tp):
assert_equal(str(tp(1e10)), str(complex(1e10)),
err_msg='Failed str formatting for type %s' % tp)
else:
- if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
- sys.version_info[1] <= 5:
- ref = '(1e+010+0j)'
- else:
- ref = '(1e+10+0j)'
+ ref = '(1e+10+0j)'
assert_equal(str(tp(1e10)), ref,
err_msg='Failed str formatting for type %s' % tp)
@@ -87,44 +85,24 @@ def test_complex_types():
def test_complex_inf_nan():
"""Check inf/nan formatting of complex types."""
- if sys.version_info[:2] >= (2, 6):
- TESTS = {
- complex(np.inf, 0): "(inf+0j)",
- complex(0, np.inf): "inf*j",
- complex(-np.inf, 0): "(-inf+0j)",
- complex(0, -np.inf): "-inf*j",
- complex(np.inf, 1): "(inf+1j)",
- complex(1, np.inf): "(1+inf*j)",
- complex(-np.inf, 1): "(-inf+1j)",
- complex(1, -np.inf): "(1-inf*j)",
- complex(np.nan, 0): "(nan+0j)",
- complex(0, np.nan): "nan*j",
- complex(-np.nan, 0): "(nan+0j)",
- complex(0, -np.nan): "nan*j",
- complex(np.nan, 1): "(nan+1j)",
- complex(1, np.nan): "(1+nan*j)",
- complex(-np.nan, 1): "(nan+1j)",
- complex(1, -np.nan): "(1+nan*j)",
- }
- else:
- TESTS = {
- complex(np.inf, 0): "(inf+0j)",
- complex(0, np.inf): "infj",
- complex(-np.inf, 0): "(-inf+0j)",
- complex(0, -np.inf): "-infj",
- complex(np.inf, 1): "(inf+1j)",
- complex(1, np.inf): "(1+infj)",
- complex(-np.inf, 1): "(-inf+1j)",
- complex(1, -np.inf): "(1-infj)",
- complex(np.nan, 0): "(nan+0j)",
- complex(0, np.nan): "nanj",
- complex(-np.nan, 0): "(nan+0j)",
- complex(0, -np.nan): "nanj",
- complex(np.nan, 1): "(nan+1j)",
- complex(1, np.nan): "(1+nanj)",
- complex(-np.nan, 1): "(nan+1j)",
- complex(1, -np.nan): "(1+nanj)",
- }
+ TESTS = {
+ complex(np.inf, 0): "(inf+0j)",
+ complex(0, np.inf): "inf*j",
+ complex(-np.inf, 0): "(-inf+0j)",
+ complex(0, -np.inf): "-inf*j",
+ complex(np.inf, 1): "(inf+1j)",
+ complex(1, np.inf): "(1+inf*j)",
+ complex(-np.inf, 1): "(-inf+1j)",
+ complex(1, -np.inf): "(1-inf*j)",
+ complex(np.nan, 0): "(nan+0j)",
+ complex(0, np.nan): "nan*j",
+ complex(-np.nan, 0): "(nan+0j)",
+ complex(0, -np.nan): "nan*j",
+ complex(np.nan, 1): "(nan+1j)",
+ complex(1, np.nan): "(1+nan*j)",
+ complex(-np.nan, 1): "(nan+1j)",
+ complex(1, -np.nan): "(1+nan*j)",
+ }
for tp in [np.complex64, np.cdouble, np.clongdouble]:
for c, s in TESTS.items():
yield _check_complex_inf_nan, c, s, tp
@@ -139,12 +117,12 @@ def _test_redirected_print(x, tp, ref=None):
stdout = sys.stdout
try:
sys.stdout = file_tp
- print tp(x)
+ print(tp(x))
sys.stdout = file
if ref:
- print ref
+ print(ref)
else:
- print x
+ print(x)
finally:
sys.stdout = stdout
@@ -161,11 +139,7 @@ def check_float_type_print(tp):
if tp(1e10).itemsize > 4:
_test_redirected_print(float(1e10), tp)
else:
- if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
- sys.version_info[1] <= 5:
- ref = '1e+010'
- else:
- ref = '1e+10'
+ ref = '1e+10'
_test_redirected_print(float(1e10), tp, ref)
def check_complex_type_print(tp):
@@ -177,11 +151,7 @@ def check_complex_type_print(tp):
if tp(1e10).itemsize > 8:
_test_redirected_print(complex(1e10), tp)
else:
- if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
- sys.version_info[1] <= 5:
- ref = '(1e+010+0j)'
- else:
- ref = '(1e+10+0j)'
+ ref = '(1e+10+0j)'
_test_redirected_print(complex(1e10), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
@@ -198,7 +168,6 @@ def test_complex_type_print():
for t in [np.complex64, np.cdouble, np.clongdouble] :
yield check_complex_type_print, t
-@dec.skipif(sys.version_info[:2] < (2, 6))
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
tests = [('{0}', True, np.bool_),
@@ -225,7 +194,7 @@ def test_scalar_format():
try:
assert_equal(fmat.format(val), fmat.format(valtype(val)),
"failed with val %s, type %s" % (val, valtype))
- except ValueError, e:
+ except ValueError as e:
assert_(False,
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
(fmat, repr(val), repr(valtype), str(e)))
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 87c661938..8c9ce5c70 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -1,9 +1,13 @@
+from __future__ import division, absolute_import, print_function
+
from os import path
import numpy as np
from numpy.testing import *
from numpy.compat import asbytes, asunicode
import warnings
+import collections
+import pickle
class TestFromrecords(TestCase):
@@ -51,11 +55,11 @@ class TestFromrecords(TestCase):
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
- a[i] = range(1, 10)
+ a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
- assert_((mine.date[i] == range(1, 10)))
+ assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
@@ -78,7 +82,7 @@ class TestFromrecords(TestCase):
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
- for k in xrange(len(ra)):
+ for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
@@ -94,7 +98,7 @@ class TestFromrecords(TestCase):
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
- assert_(callable(ra.field))
+ assert_(isinstance(ra.field, collections.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
@@ -143,6 +147,17 @@ class TestRecord(TestCase):
y = self.data[['col2', 'col1']]
assert_equal(x[0][0], y[0][1])
+ def test_pickle_1(self):
+ # Issue #1529
+ a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
+ assert_equal(a, pickle.loads(pickle.dumps(a)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+
+ def test_pickle_2(self):
+ a = self.data
+ assert_equal(a, pickle.loads(pickle.dumps(a)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 0f268fce8..86b0e48a2 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import pickle
import sys
import platform
@@ -5,20 +7,17 @@ import gc
import copy
import warnings
import tempfile
-from StringIO import StringIO
from os import path
+from io import BytesIO
+
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
-from numpy.testing.utils import _assert_valid_refcount, WarningManager
-from numpy.compat import asbytes, asunicode, asbytes_nested
-
-if sys.version_info[0] >= 3:
- import io
- StringIO = io.BytesIO
+from numpy.testing.utils import _assert_valid_refcount
+from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu
rlevel = 1
@@ -35,7 +34,7 @@ class TestRegression(TestCase):
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
- f = StringIO()
+ f = BytesIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
@@ -88,7 +87,7 @@ class TestRegression(TestCase):
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
- f = StringIO()
+ f = BytesIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
@@ -139,7 +138,7 @@ class TestRegression(TestCase):
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
- ucs_value = u'\U0010FFFF'
+ ucs_value = sixu('\U0010FFFF')
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
@@ -203,15 +202,21 @@ class TestRegression(TestCase):
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
- print buf[0]['head']
assert_(buf[0]['head'] == 1)
-
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
- z = np.dot(x, np.transpose(y))
+ # Dummy array to detect bad memory access:
+ _z = np.ones(10)
+ _dummy = np.empty((0, 10))
+ z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
+ np.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
+ # Do the same for the built-in dot:
+ np.core.multiarray.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
@@ -232,7 +237,7 @@ class TestRegression(TestCase):
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
- for i in xrange(a.ndim):
+ for i in range(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
@@ -258,6 +263,11 @@ class TestRegression(TestCase):
"""Ticket #143"""
assert_equal(0,np.add.identity)
+ def test_numpy_float_python_long_addition(self):
+ # Check that numpy float and python longs can be added correctly.
+ a = np.float_(23.) + 2**135
+ assert_equal(a, 23. + 2**135)
+
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
@@ -298,10 +308,23 @@ class TestRegression(TestCase):
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
+ def test_nonarray_assignment(self):
+ # See also Issue gh-2870, test for nonarray assignment
+ # and equivalent unsafe casted array assignment
+ a = np.arange(10)
+ b = np.ones(10, dtype=bool)
+ r = np.arange(10)
+ def assign(a, b, c):
+ a[b] = c
+ assert_raises(ValueError, assign, a, b, np.nan)
+ a[b] = np.array(np.nan) # but not this.
+ assert_raises(ValueError, assign, a, r, np.nan)
+ a[r] = np.array(np.nan)
+
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
- f = StringIO()
+ f = BytesIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
@@ -365,7 +388,6 @@ class TestRegression(TestCase):
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
- import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
@@ -526,6 +548,18 @@ class TestRegression(TestCase):
a = np.ones((0,2))
a.shape = (-1,2)
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.
+ @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
+ def test_reshape_trailing_ones_strides(self):
+ # Github issue gh-2949, bad strides for trailing ones of new shape
+ a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
+ strides_c = (16, 8, 8, 8)
+ strides_f = (8, 24, 48, 48)
+ assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
+ assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
+ assert_equal(np.array(0, dtype=np.int32).reshape(1,1).strides, (4,4))
+
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
@@ -607,14 +641,22 @@ class TestRegression(TestCase):
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
+ def test_take_object_fail(self):
+ # Issue gh-3001
+ d = 123.
+ a = np.array([d, 1], dtype=object)
+ ref_d = sys.getrefcount(d)
+ try:
+ a.take([0, 100])
+ except IndexError:
+ pass
+ assert_(ref_d == sys.getrefcount(d))
+
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
- errstate = np.seterr(all='raise')
- try:
+ with np.errstate(all='raise'):
sstr = np.array_str(s)
- finally:
- np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
@@ -634,7 +676,7 @@ class TestRegression(TestCase):
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
- y = x.transpose(range(16))
+ y = x.transpose(list(range(16)))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
@@ -681,10 +723,9 @@ class TestRegression(TestCase):
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
- import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
- new = cPickle.loads(cPickle.dumps(el))
+ new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
@@ -758,6 +799,10 @@ class TestRegression(TestCase):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
+ # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.
+ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
+ # 0-sized reshape itself is tested elsewhere.
+ @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
@@ -1049,6 +1094,11 @@ class TestRegression(TestCase):
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
+ def test_nonscalar_item_method(self):
+ # Make sure that .item() fails graciously when it should
+ a = np.arange(5)
+ assert_raises(ValueError, a.item)
+
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
@@ -1059,7 +1109,7 @@ class TestRegression(TestCase):
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
- x = np.array([(asbytes('a'),u'b')], dtype=t)
+ x = np.array([(asbytes('a'),sixu('b'))], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
@@ -1067,14 +1117,11 @@ class TestRegression(TestCase):
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
- olderr = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
- finally:
- np.seterr(**olderr)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
@@ -1116,8 +1163,7 @@ class TestRegression(TestCase):
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
- old_err = np.seterr(all="ignore")
- try:
+ with np.errstate(all="ignore"):
z = int(0)
p = np.int32(-1)
@@ -1127,8 +1173,6 @@ class TestRegression(TestCase):
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
- finally:
- np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
@@ -1138,11 +1182,34 @@ class TestRegression(TestCase):
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
+ def test_void_scalar_constructor(self):
+ #Issue #1550
+
+ #Create test string data, construct void scalar from data and assert
+ #that void scalar contains original data.
+ test_string = np.array("test")
+ test_string_void_scalar = np.core.multiarray.scalar(
+ np.dtype(("V",test_string.dtype.itemsize)), test_string.tostring())
+
+ assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
+
+ #Create record scalar, construct from data and assert that
+ #reconstructed scalar is correct.
+ test_record = np.ones((), "i,i")
+ test_record_void_scalar = np.core.multiarray.scalar(
+ test_record.dtype, test_record.tostring())
+
+ assert_(test_record_void_scalar == test_record)
+
+ #Test pickle and unpickle of void and record scalars
+ assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
+ assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
+
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
- for k in xrange(3):
+ for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
@@ -1161,10 +1228,10 @@ class TestRegression(TestCase):
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
- except ValueError, e:
+ except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception, e:
+ except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
@@ -1175,16 +1242,16 @@ class TestRegression(TestCase):
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
- except ValueError, e:
+ except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception, e:
+ except Exception as e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
- a = np.fromiter(range(10), dtype='b')
- b = np.fromiter(range(10), dtype='B')
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
@@ -1243,21 +1310,24 @@ class TestRegression(TestCase):
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
- a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
+ a = np.array(
+ [ [sixu('abc'), sixu('\u03a3')],
+ [sixu('asdf'), sixu('erw')]
+ ], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
- a = np.array(['1234', u'123'])
+ a = np.array(['1234', sixu('123')])
assert_(a.itemsize == 16)
- a = np.array([u'123', '1234'])
+ a = np.array([sixu('123'), '1234'])
assert_(a.itemsize == 16)
- a = np.array(['1234', u'123', '12345'])
+ a = np.array(['1234', sixu('123'), '12345'])
assert_(a.itemsize == 20)
- a = np.array([u'123', '1234', u'12345'])
+ a = np.array([sixu('123'), '1234', sixu('12345')])
assert_(a.itemsize == 20)
- a = np.array([u'123', '1234', u'1234'])
+ a = np.array([sixu('123'), '1234', sixu('1234')])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
@@ -1290,16 +1360,20 @@ class TestRegression(TestCase):
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
- """Ticket #1259"""
- z = np.array([-1j], '<c8')
- x = z[0] # always native-endian
- y = x.byteswap()
- if x.dtype.byteorder == z.dtype.byteorder:
- # little-endian machine
- assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
- else:
- # big-endian machine
- assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
+ """Ticket #1259 and gh-441"""
+ for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
+ z = np.array([2.2-1.1j], dtype)
+ x = z[0] # always native-endian
+ y = x.byteswap()
+ if x.dtype.byteorder == z.dtype.byteorder:
+ # little-endian machine
+ assert_equal(x, np.fromstring(y.tostring(), dtype=dtype.newbyteorder()))
+ else:
+ # big-endian machine
+ assert_equal(x, np.fromstring(y.tostring(), dtype=dtype))
+ # double check real and imaginary parts:
+ assert_equal(x.real, y.real.byteswap())
+ assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
@@ -1332,12 +1406,9 @@ class TestRegression(TestCase):
min = np.array([np.iinfo(t).min])
min //= -1
- old_err = np.seterr(divide="ignore")
- try:
+ with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
- finally:
- np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
@@ -1363,8 +1434,8 @@ class TestRegression(TestCase):
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
- a = np.fromiter(range(10), dtype='b')
- b = np.fromiter(range(10), dtype='B')
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
@@ -1378,14 +1449,17 @@ class TestRegression(TestCase):
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
- for x in dtypes:
- b = a.astype(x)
- for y in dtypes:
- c = a.astype(y)
- try:
- np.dot(b, c)
- except TypeError, e:
- failures.append((x, y))
+ # ignore complex warnings
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', np.ComplexWarning)
+ for x in dtypes:
+ b = a.astype(x)
+ for y in dtypes:
+ c = a.astype(y)
+ try:
+ np.dot(b, c)
+ except TypeError as e:
+ failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
@@ -1474,13 +1548,9 @@ class TestRegression(TestCase):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
- finally:
- warn_ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
@@ -1500,7 +1570,7 @@ class TestRegression(TestCase):
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
- ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
+ ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
@@ -1649,6 +1719,13 @@ class TestRegression(TestCase):
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
+ def test_alignment_update(self):
+ """Check that alignment flag is updated on stride setting"""
+ a = np.arange(10)
+ assert_(a.flags.aligned)
+ a.strides = 3
+ assert_(not a.flags.aligned)
+
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
@@ -1762,7 +1839,7 @@ class TestRegression(TestCase):
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
- a = np.array([u'abcd'])
+ a = np.array([sixu('abcd')])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
@@ -1799,5 +1876,34 @@ class TestRegression(TestCase):
a = np.recarray((2, ), dtype)
assert_raises(TypeError, np.searchsorted, a, 1)
+ def test_complex64_alignment(self):
+ # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
+ dtt = np.complex64
+ arr = np.arange(10, dtype=dtt)
+ # 2D array
+ arr2 = np.reshape(arr, (2, 5))
+ # Fortran write followed by (C or F) read caused bus error
+ data_str = arr2.tostring('F')
+ data_back = np.ndarray(arr2.shape,
+ arr2.dtype,
+ buffer=data_str,
+ order='F')
+ assert_array_equal(arr2, data_back)
+
+ def test_structured_count_nonzero(self):
+ arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
+ count = np.count_nonzero(arr)
+ assert_equal(count, 0)
+
+ def test_copymodule_preserves_f_contiguity(self):
+ a = np.empty((2, 2), order='F')
+ b = copy.copy(a)
+ c = copy.deepcopy(a)
+ assert_(b.flags.fortran)
+ assert_(b.flags.f_contiguous)
+ assert_(c.flags.fortran)
+ assert_(c.flags.f_contiguous)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 24b5eae24..3e1aaef3b 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -1,5 +1,8 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
+from numpy.testing.utils import gen_alignment_data
import numpy as np
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -42,9 +45,40 @@ class TestTypes(TestCase):
assert_equal(a,b)
+class TestBaseMath(TestCase):
+ def test_blocked(self):
+ #test alignments offsets for simd instructions
+ for dt in [np.float32, np.float64]:
+ for out, inp1, inp2, msg in gen_alignment_data(dtype=dt,
+ type='binary',
+ max_size=12):
+ exp1 = np.ones_like(inp1)
+ inp1[...] = np.ones_like(inp1)
+ inp2[...] = np.zeros_like(inp2)
+ assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
+ assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
+ assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
+
+ np.add(inp1, inp2, out=out)
+ assert_almost_equal(out, exp1, err_msg=msg)
+
+ inp2[...] += np.arange(inp2.size, dtype=dt) + 1
+ assert_almost_equal(np.square(inp2),
+ np.multiply(inp2, inp2), err_msg=msg)
+ assert_almost_equal(np.reciprocal(inp2),
+ np.divide(1, inp2), err_msg=msg)
+
+ inp1[...] = np.ones_like(inp1)
+ inp2[...] = np.zeros_like(inp2)
+ np.add(inp1, 1, out=out)
+ assert_almost_equal(out, exp1 + 1, err_msg=msg)
+ np.add(1, inp2, out=out)
+ assert_almost_equal(out, exp1, err_msg=msg)
+
+
class TestPower(TestCase):
def test_small_types(self):
- for t in [np.int8, np.int16]:
+ for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t,b))
@@ -58,12 +92,25 @@ class TestPower(TestCase):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
-
+ def test_mixed_types(self):
+ typelist = [np.int8,np.int16,np.float16,
+ np.float32,np.float64,np.int8,
+ np.int16,np.int32,np.int64]
+ for t1 in typelist:
+ for t2 in typelist:
+ a = t1(3)
+ b = t2(2)
+ result = a**b
+ msg = ("error with %r and %r:"
+ "got %r, expected %r") % (t1, t2, result, 9)
+ if np.issubdtype(np.dtype(result), np.integer):
+ assert_(result == 9, msg)
+ else:
+ assert_almost_equal(result, 9, err_msg=msg)
class TestComplexDivision(TestCase):
def test_zero_division(self):
- err = np.seterr(all="ignore")
- try:
+ with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
@@ -78,8 +125,6 @@ class TestComplexDivision(TestCase):
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
- finally:
- np.seterr(**err)
class TestConversion(TestCase):
@@ -88,10 +133,10 @@ class TestConversion(TestCase):
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l,dtype=T)
- assert_equal(map(int,a), li)
+ assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
- assert_equal(map(int,a), li[:3])
+ assert_equal([int(_m) for _m in a], li[:3])
#class TestRepr(TestCase):
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
new file mode 100644
index 000000000..8d0f27182
--- /dev/null
+++ b/numpy/core/tests/test_scalarprint.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+""" Test printing of scalar types.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from numpy.testing import TestCase, assert_, run_module_suite
+
+
+class TestRealScalars(TestCase):
+ def test_str(self):
+ svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
+ styps = [np.float16, np.float32, np.float64, np.longdouble]
+ actual = [str(f(c)) for c in svals for f in styps]
+ wanted = [
+ '0.0', '0.0', '0.0', '0.0',
+ '-0.0', '-0.0', '-0.0', '-0.0',
+ '1.0', '1.0', '1.0', '1.0',
+ '-1.0', '-1.0', '-1.0', '-1.0',
+ 'inf', 'inf', 'inf', 'inf',
+ '-inf', '-inf', '-inf', '-inf',
+ 'nan', 'nan', 'nan', 'nan']
+
+ for res, val in zip(actual, wanted):
+ assert_(res == val)
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index b02ee84a5..8cbcfede3 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -1,71 +1,84 @@
+from __future__ import division, absolute_import, print_function
+
import warnings
import numpy as np
from numpy.testing import (TestCase, assert_, assert_raises, assert_array_equal,
assert_equal, run_module_suite)
from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
vstack, hstack, newaxis, concatenate)
+from numpy.compat import long
class TestAtleast1d(TestCase):
def test_0D_array(self):
- a = array(1); b = array(2);
- res=map(atleast_1d,[a,b])
+ a = array(1)
+ b = array(2)
+ res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]),array([2])]
- assert_array_equal(res,desired)
+ assert_array_equal(res, desired)
def test_1D_array(self):
- a = array([1,2]); b = array([2,3]);
- res=map(atleast_1d,[a,b])
- desired = [array([1,2]),array([2,3])]
- assert_array_equal(res,desired)
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [array([1, 2]),array([2, 3])]
+ assert_array_equal(res, desired)
def test_2D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- res=map(atleast_1d,[a,b])
- desired = [a,b]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
def test_3D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- a = array([a,a]);b = array([b,b]);
- res=map(atleast_1d,[a,b])
- desired = [a,b]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
- assert_(atleast_1d(3L).shape == (1,))
+ assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2,3],[4,5]]).shape == (2,2))
class TestAtleast2d(TestCase):
def test_0D_array(self):
- a = array(1); b = array(2);
- res=map(atleast_2d,[a,b])
- desired = [array([[1]]),array([[2]])]
- assert_array_equal(res,desired)
+ a = array(1)
+ b = array(2)
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1]]), array([[2]])]
+ assert_array_equal(res, desired)
def test_1D_array(self):
- a = array([1,2]); b = array([2,3]);
- res=map(atleast_2d,[a,b])
- desired = [array([[1,2]]),array([[2,3]])]
- assert_array_equal(res,desired)
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1, 2]]), array([[2, 3]])]
+ assert_array_equal(res, desired)
def test_2D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- res=map(atleast_2d,[a,b])
- desired = [a,b]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
def test_3D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- a = array([a,a]);b = array([b,b]);
- res=map(atleast_2d,[a,b])
- desired = [a,b]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
@@ -77,46 +90,54 @@ class TestAtleast2d(TestCase):
class TestAtleast3d(TestCase):
def test_0D_array(self):
- a = array(1); b = array(2);
- res=map(atleast_3d,[a,b])
- desired = [array([[[1]]]),array([[[2]]])]
- assert_array_equal(res,desired)
+ a = array(1)
+ b = array(2)
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1]]]), array([[[2]]])]
+ assert_array_equal(res, desired)
def test_1D_array(self):
- a = array([1,2]); b = array([2,3]);
- res=map(atleast_3d,[a,b])
- desired = [array([[[1],[2]]]),array([[[2],[3]]])]
- assert_array_equal(res,desired)
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1], [2]]]), array([[[2], [3]]])]
+ assert_array_equal(res, desired)
def test_2D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- res=map(atleast_3d,[a,b])
- desired = [a[:,:,newaxis],b[:,:,newaxis]]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a[:,:,newaxis], b[:,:,newaxis]]
+ assert_array_equal(res, desired)
def test_3D_array(self):
- a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]);
- a = array([a,a]);b = array([b,b]);
- res=map(atleast_3d,[a,b])
- desired = [a,b]
- assert_array_equal(res,desired)
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
class TestHstack(TestCase):
def test_0D_array(self):
- a = array(1); b = array(2);
+ a = array(1)
+ b = array(2)
res=hstack([a,b])
desired = array([1,2])
assert_array_equal(res,desired)
def test_1D_array(self):
- a = array([1]); b = array([2]);
+ a = array([1])
+ b = array([2])
res=hstack([a,b])
desired = array([1,2])
assert_array_equal(res,desired)
def test_2D_array(self):
- a = array([[1],[2]]); b = array([[1],[2]]);
+ a = array([[1],[2]])
+ b = array([[1],[2]])
res=hstack([a,b])
desired = array([[1,1],[2,2]])
assert_array_equal(res,desired)
@@ -124,32 +145,36 @@ class TestHstack(TestCase):
class TestVstack(TestCase):
def test_0D_array(self):
- a = array(1); b = array(2);
+ a = array(1)
+ b = array(2)
res=vstack([a,b])
desired = array([[1],[2]])
assert_array_equal(res,desired)
def test_1D_array(self):
- a = array([1]); b = array([2]);
+ a = array([1])
+ b = array([2])
res=vstack([a,b])
desired = array([[1],[2]])
assert_array_equal(res,desired)
def test_2D_array(self):
- a = array([[1],[2]]); b = array([[1],[2]]);
+ a = array([[1],[2]])
+ b = array([[1],[2]])
res=vstack([a,b])
desired = array([[1],[2],[1],[2]])
assert_array_equal(res,desired)
def test_2D_array2(self):
- a = array([1,2]); b = array([1,2]);
+ a = array([1,2])
+ b = array([1,2])
res=vstack([a,b])
desired = array([[1,2],[1,2]])
assert_array_equal(res,desired)
def test_concatenate_axis_None():
a = np.arange(4, dtype=np.float64).reshape((2,2))
- b = range(3)
+ b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 57fd66892..eadb767ed 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,9 +1,13 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import numpy as np
from numpy.testing import *
import numpy.core.umath_tests as umt
+import numpy.core.operand_flag_tests as opflag_tests
from numpy.compat import asbytes
+from numpy.core.test_rational import *
class TestUfunc(TestCase):
def test_pickle(self):
@@ -12,13 +16,14 @@ class TestUfunc(TestCase):
def test_pickle_withstring(self):
import pickle
- astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
+ astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
+ "(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert pickle.loads(astring) is np.cos
def test_reduceat_shifting_sum(self) :
L = 6
x = np.arange(L)
- idx = np.array(zip(np.arange(L-2), np.arange(L-2)+2)).ravel()
+ idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x,idx)[::2], [1,3,5,7])
def test_generic_loops(self) :
@@ -314,6 +319,8 @@ class TestUfunc(TestCase):
def test_inner1d(self):
a = np.arange(6).reshape((2,3))
assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1))
+ a = np.arange(6)
+ assert_array_equal(umt.inner1d(a,a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
@@ -423,6 +430,13 @@ class TestUfunc(TestCase):
w = np.arange(300,324).reshape((2,3,4))
assert_array_equal(umt.innerwt(a,b,w), np.sum(a*b*w,axis=-1))
+ def test_innerwt_empty(self):
+ """Test generalized ufunc with zero-sized operands"""
+ a = np.array([], dtype='f8')
+ b = np.array([], dtype='f8')
+ w = np.array([], dtype='f8')
+ assert_array_equal(umt.innerwt(a,b,w), np.sum(a*b*w,axis=-1))
+
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
@@ -438,7 +452,7 @@ class TestUfunc(TestCase):
ret = ()
base = permute_n(n-1)
for perm in base:
- for i in xrange(n):
+ for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
@@ -552,12 +566,25 @@ class TestUfunc(TestCase):
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
+ # Check scalar behaviour for ufuncs without an identity
+ assert_equal(np.power.reduce(3), 3)
+
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
+ # check if scalars/0-d arrays get cast
+ assert_(type(np.any(0, axis=0)) is np.bool_)
+
+ # assert that 0-d arrays get wrapped
+ class MyArray(np.ndarray):
+ pass
+ a = np.array(1).view(MyArray)
+ assert_(type(np.any(a)) is MyArray)
+
+
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200,100), np.int64)
@@ -760,5 +787,83 @@ class TestUfunc(TestCase):
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [4, 5, 6])
+ def test_ufunc_custom_out(self):
+ # Test ufunc with built in input types and custom output type
+
+ a = np.array([0, 1, 2], dtype='i8')
+ b = np.array([0, 1, 2], dtype='i8')
+ c = np.empty(3, dtype=rational)
+
+ # Output must be specified so numpy knows what
+ # ufunc signature to look for
+ result = test_add(a, b, c)
+ assert_equal(result, np.array([0, 2, 4], dtype=rational))
+
+ # no output type should raise TypeError
+ assert_raises(TypeError, test_add, a, b)
+
+ def test_operand_flags(self):
+ a = np.arange(16, dtype='l').reshape(4,4)
+ b = np.arange(9, dtype='l').reshape(3,3)
+ opflag_tests.inplace_add(a[:-1,:-1], b)
+ assert_equal(a, np.array([[0,2,4,3],[7,9,11,7],
+ [14,16,18,11],[12,13,14,15]], dtype='l'))
+
+ a = np.array(0)
+ opflag_tests.inplace_add(a, 3)
+ assert_equal(a, 3)
+ opflag_tests.inplace_add(a, [3, 4])
+ assert_equal(a, 10)
+
+ def test_struct_ufunc(self):
+ import numpy.core.struct_ufunc_test as struct_ufunc
+
+ a = np.array([(1,2,3)], dtype='u8,u8,u8')
+ b = np.array([(1,2,3)], dtype='u8,u8,u8')
+
+ result = struct_ufunc.add_triplet(a, b)
+ assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
+
+ def test_custom_ufunc(self):
+ a = np.array([rational(1,2), rational(1,3), rational(1,4)],
+ dtype=rational);
+ b = np.array([rational(1,2), rational(1,3), rational(1,4)],
+ dtype=rational);
+
+ result = test_add_rationals(a, b)
+ expected = np.array([rational(1), rational(2,3), rational(1,2)],
+ dtype=rational);
+ assert_equal(result, expected);
+
+ def test_custom_array_like(self):
+ class MyThing(object):
+ __array_priority__ = 1000
+
+ rmul_count = 0
+ getitem_count = 0
+
+ def __init__(self, shape):
+ self.shape = shape
+
+ def __len__(self):
+ return self.shape[0]
+
+ def __getitem__(self, i):
+ MyThing.getitem_count += 1
+ if not isinstance(i, tuple):
+ i = (i,)
+ if len(i) > len(self.shape):
+ raise IndexError("boo")
+
+ return MyThing(self.shape[len(i):])
+
+ def __rmul__(self, other):
+ MyThing.rmul_count += 1
+ return self
+
+ np.float64(5)*MyThing((3, 3))
+ assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
+ assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 892b1fae6..b2d47d052 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1,7 +1,10 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import platform
from numpy.testing import *
+from numpy.testing.utils import gen_alignment_data
import numpy.core.umath as ncu
import numpy as np
@@ -20,6 +23,19 @@ class _FilterInvalids(object):
np.seterr(**self.olderr)
+class TestConstants(TestCase):
+ def test_pi(self):
+ assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
+
+
+ def test_e(self):
+ assert_allclose(ncu.e, 2.718281828459045, 1e-15)
+
+
+ def test_euler_gamma(self):
+ assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+
+
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
@@ -44,8 +60,7 @@ class TestDivision(TestCase):
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
- err = np.seterr(invalid="ignore", divide="ignore")
- try:
+ with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
@@ -57,8 +72,6 @@ class TestDivision(TestCase):
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
- finally:
- np.seterr(**err)
def test_floor_division_complex(self):
# check that implementation is correct
@@ -85,6 +98,21 @@ class TestPower(TestCase):
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
+ for out, inp, msg in gen_alignment_data(dtype=np.float32,
+ type='unary'):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+ for out, inp, msg in gen_alignment_data(dtype=np.float64,
+ type='unary'):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
@@ -109,14 +137,11 @@ class TestPower(TestCase):
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
- err = np.seterr(invalid="ignore")
z = np.array([z], dtype=np.complex_)
- try:
+ with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
- finally:
- np.seterr(**err)
def test_power_zero(self):
# ticket #1271
@@ -143,9 +168,9 @@ class TestPower(TestCase):
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
- def test_fast_power(self):
- x=np.array([1,2,3], np.int16)
- assert (x**2.00001).dtype is (x**2.0).dtype
+ def test_fast_power(self):
+ x = np.array([1,2,3], np.int16)
+ assert_((x**2.00001).dtype is (x**2.0).dtype)
class TestLog2(TestCase):
def test_log2_values(self) :
@@ -190,19 +215,16 @@ class TestLogAddExp2(_FilterInvalids):
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self) :
- err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
- try:
+ with np.errstate(invalid='ignore'):
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
- finally:
- np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
@@ -256,19 +278,16 @@ class TestLogAddExp(_FilterInvalids):
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self) :
- err = np.seterr(invalid='ignore')
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
- try:
+ with np.errstate(invalid='ignore'):
for dt in ['f','d','g'] :
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
- finally:
- np.seterr(**err)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
@@ -297,19 +316,15 @@ class TestHypot(TestCase, object):
def assert_hypot_isnan(x, y):
- err = np.seterr(invalid='ignore')
- try:
- assert_(np.isnan(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
- finally:
- np.seterr(**err)
+ with np.errstate(invalid='ignore'):
+ assert_(np.isnan(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
- err = np.seterr(invalid='ignore')
- try:
- assert_(np.isinf(ncu.hypot(x, y)), "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
- finally:
- np.seterr(**err)
+ with np.errstate(invalid='ignore'):
+ assert_(np.isinf(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues(TestCase):
@@ -317,7 +332,7 @@ class TestHypotSpecialValues(TestCase):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
- def test_nan_outputs(self):
+ def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
@@ -432,19 +447,13 @@ class TestLdexp(TestCase):
self._check_ldexp('i')
self._check_ldexp('l')
- @dec.knownfailureif(sys.platform == 'win32' and sys.version_info < (2, 6),
- "python.org < 2.6 binaries have broken ldexp in the "
- "C runtime")
def test_ldexp_overflow(self):
# silence warning emitted on overflow
- err = np.seterr(over="ignore")
- try:
+ with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
- finally:
- np.seterr(**err)
class TestMaximum(_FilterInvalids):
@@ -648,15 +657,57 @@ class TestSign(TestCase):
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
- olderr = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
- finally:
- np.seterr(**olderr)
+
+
+class TestMinMax(TestCase):
+ def test_minmax_blocked(self):
+ "simd tests on max/min"
+ for dt in [np.float32, np.float64]:
+ for out, inp, msg in gen_alignment_data(dtype=dt, type='unary',
+ max_size=17):
+ for i in range(inp.size):
+ inp[:] = np.arange(inp.size, dtype=dt)
+ inp[i] = np.nan
+ self.assertTrue(np.isnan(inp.max()),
+ msg=repr(inp) + '\n' + msg)
+ self.assertTrue(np.isnan(inp.min()),
+ msg=repr(inp) + '\n' + msg)
+
+ inp[i] = 1e10
+ assert_equal(inp.max(), 1e10, err_msg=msg)
+ inp[i] = -1e10
+ assert_equal(inp.min(), -1e10, err_msg=msg)
+
+
+
+class TestAbsolute(TestCase):
+ def test_abs_blocked(self):
+ "simd tests on abs"
+ for dt in [np.float32, np.float64]:
+ for out, inp, msg in gen_alignment_data(dtype=dt, type='unary',
+ max_size=17):
+ tgt = [ncu.absolute(i) for i in inp]
+ np.absolute(inp, out=out)
+ assert_equal(out, tgt, err_msg=msg)
+ self.assertTrue((out >= 0).all())
+
+ # will throw invalid flag depending on compiler optimizations
+ with np.errstate(invalid='ignore'):
+ for v in [np.nan, -np.inf, np.inf]:
+ for i in range(inp.size):
+ d = np.arange(inp.size, dtype=dt)
+ inp[:] = -d
+ inp[i] = v
+ d[i] = -v if v == -np.inf else v
+ assert_array_equal(np.abs(inp), d, err_msg=msg)
+ np.abs(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
class TestSpecialMethods(TestCase):
@@ -901,12 +952,6 @@ class TestComplexFunctions(object):
def test_against_cmath(self):
import cmath, sys
- # cmath.asinh is broken in some versions of Python, see
- # http://bugs.python.org/issue1381
- broken_cmath_asinh = False
- if sys.version_info < (2,6):
- broken_cmath_asinh = True
-
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
@@ -921,10 +966,6 @@ class TestComplexFunctions(object):
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
-
- if cname == 'asinh' and broken_cmath_asinh:
- continue
-
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b))
def check_loss_of_precision(self, dtype):
@@ -1099,12 +1140,9 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
def test_copysign():
assert_(np.copysign(1, -1) == -1)
- old_err = np.seterr(divide="ignore")
- try:
+ with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
- finally:
- np.seterr(**old_err)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
@@ -1131,19 +1169,16 @@ def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
- err = np.seterr(invalid='ignore')
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
- try:
+ with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
- finally:
- np.seterr(**err)
def test_spacing():
return _test_spacing(np.float64)
@@ -1228,14 +1263,28 @@ def test_reduceat():
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
+def test_reduceat_empty():
+ """Reduceat should work with empty arrays"""
+ indices = np.array([], 'i4')
+ x = np.array([], 'f8')
+ result = np.add.reduceat(x, indices)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (0,))
+ # Another case with a slightly different zero-sized shape
+ x = np.ones((5,2))
+ result = np.add.reduceat(x, [], axis=0)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (0, 2))
+ result = np.add.reduceat(x, [], axis=1)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
- olderr = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
@@ -1249,8 +1298,6 @@ def test_complex_nan_comparisons():
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
- finally:
- np.seterr(**olderr)
if __name__ == "__main__":
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 76fa29be8..ebf805c6d 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import platform
@@ -12,12 +14,9 @@ import numpy as np
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
-olderr = np.seterr(all='ignore')
-try:
+with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
-finally:
- np.seterr(**olderr)
# TODO: replace with a check on whether platform-provided C99 funcs are used
skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
@@ -76,52 +75,40 @@ class TestCexp(object):
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform %(z.real, z.imag))
- finally:
- np.seterr(**err)
yield _check_ninf_inf, None
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- finally:
- np.seterr(**err)
yield _check_inf_inf, None
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
- finally:
- np.seterr(**err)
yield _check_ninf_nan, None
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
z = f(np.array(np.complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
- finally:
- np.seterr(**err)
yield _check_inf_nan, None
@@ -151,6 +138,7 @@ class TestClog(TestCase):
assert_almost_equal(y[i], y_r[i])
@platform_skip
+ @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_special_values(self):
xl = []
yl = []
@@ -161,30 +149,24 @@ class TestClog(TestCase):
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
- err = np.seterr(divide='raise')
- try:
+ with np.errstate(divide='raise'):
x = np.array([np.NZERO], dtype=np.complex)
y = np.complex(-np.inf, np.pi)
self.assertRaises(FloatingPointError, np.log, x)
- np.seterr(divide='ignore')
+ with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
- finally:
- np.seterr(**err)
xl.append(x)
yl.append(y)
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
- err = np.seterr(divide='raise')
- try:
+ with np.errstate(divide='raise'):
x = np.array([0], dtype=np.complex)
y = np.complex(-np.inf, 0)
self.assertRaises(FloatingPointError, np.log, x)
- np.seterr(divide='ignore')
+ with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
- finally:
- np.seterr(**err)
xl.append(x)
yl.append(y)
@@ -203,27 +185,21 @@ class TestClog(TestCase):
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
- err = np.seterr(invalid='raise')
- try:
+ with np.errstate(invalid='raise'):
x = np.array([complex(1., np.nan)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
#self.assertRaises(FloatingPointError, np.log, x)
- np.seterr(invalid='ignore')
+ with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
- finally:
- np.seterr(**err)
xl.append(x)
yl.append(y)
- err = np.seterr(invalid='raise')
- try:
+ with np.errstate(invalid='raise'):
x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
#self.assertRaises(FloatingPointError, np.log, x)
- np.seterr(invalid='ignore')
+ with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
- finally:
- np.seterr(**err)
xl.append(x)
yl.append(y)
@@ -293,12 +269,9 @@ class TestClog(TestCase):
# clog(conj(z)) = conj(clog(z)).
xa = np.array(xl, dtype=np.complex)
ya = np.array(yl, dtype=np.complex)
- err = np.seterr(divide='ignore')
- try:
+ with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i])))
- finally:
- np.seterr(**err)
class TestCsqrt(object):
@@ -359,12 +332,9 @@ class TestCsqrt(object):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
- finally:
- np.seterr(**err)
yield _check_ninf_nan, None
@@ -397,7 +367,7 @@ class TestCpow(TestCase):
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
- lx = range(len(x))
+ lx = list(range(len(x)))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
@@ -410,7 +380,7 @@ class TestCpow(TestCase):
def test_array(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
- lx = range(len(x))
+ lx = list(range(len(x)))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
@@ -477,7 +447,6 @@ class TestCabs(object):
return np.abs(np.complex(a, b))
xa = np.array(x, dtype=np.complex)
- ya = np.array(x, dtype=np.complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
yield check_real_value, f, x[i], y[i], ref
@@ -556,16 +525,13 @@ def check_real_value(f, x1, y1, x, exact=True):
assert_almost_equal(f(z1), x)
def check_complex_value(f, x1, y1, x2, y2, exact=True):
- err = np.seterr(invalid='ignore')
z1 = np.array([complex(x1, y1)])
z2 = np.complex(x2, y2)
- try:
+ with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
- finally:
- np.seterr(**err)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index 7b27076e8..b29fe2010 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -1,11 +1,25 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from numpy.core import *
-from numpy.compat import asbytes
+from numpy.compat import asbytes, sixu
# Guess the UCS length for this python interpreter
-if sys.version_info[0] >= 3:
+if sys.version_info[:2] >= (3, 3):
+ # Python 3.3 uses a flexible string representation
+ ucs4 = False
+ def buffer_length(arr):
+ if isinstance(arr, unicode):
+ arr = str(arr)
+ return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr)
+ v = memoryview(arr)
+ if v.shape is None:
+ return len(v) * v.itemsize
+ else:
+ return prod(v.shape) * v.itemsize
+elif sys.version_info[0] >= 3:
import array as _array
ucs4 = (_array.array('u').itemsize == 4)
def buffer_length(arr):
@@ -17,7 +31,7 @@ if sys.version_info[0] >= 3:
else:
return prod(v.shape) * v.itemsize
else:
- if len(buffer(u'u')) == 4:
+ if len(buffer(sixu('u'))) == 4:
ucs4 = True
else:
ucs4 = False
@@ -29,9 +43,9 @@ else:
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
-ucs2_value = u'\u0900'
+ucs2_value = sixu('\u0900')
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
-ucs4_value = u'\U00100900'
+ucs4_value = sixu('\U00100900')
############################################################
@@ -48,7 +62,7 @@ class create_zeros(object):
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == u'')
+ self.assertTrue(ua_scalar == sixu(''))
# Encode to ascii and double check
self.assertTrue(ua_scalar.encode('ascii') == asbytes(''))
# Check buffer lengths for scalars
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 367b08f49..961fa6012 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -40,8 +40,8 @@ in-place. For example::
We wrap it using:
->>> lib.foo_func.restype = None #doctest: +SKIP
->>> lib.foo.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+>>> _lib.foo_func.restype = None #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
Then, we're ready to call ``foo_func``:
@@ -49,6 +49,8 @@ Then, we're ready to call ``foo_func``:
>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
@@ -100,17 +102,11 @@ else:
from numpy.distutils.misc_util import get_shared_lib_extension
so_ext = get_shared_lib_extension()
libname_ext = [libname + so_ext]
- if sys.version[:3] >= '3.2':
- # For Python >= 3.2 a tag may be added to lib extension
- # (platform dependent). If we find such a tag, try both with
- # and without it.
- so_ext2 = get_shared_lib_extension(is_python_ext=True)
- if not so_ext2 == so_ext:
- libname_ext.insert(0, libname + so_ext2)
- if sys.platform == 'win32':
- libname_ext.insert(0, '%s.dll' % libname)
- elif sys.platform == 'darwin':
- libname_ext.insert(0, '%s.dylib' % libname)
+ # mac, windows and linux >= py3.2 shared library and loadable
+ # module have different extensions so try both
+ so_ext2 = get_shared_lib_extension(is_python_ext=True)
+ if not so_ext2 == so_ext:
+ libname_ext.insert(0, libname + so_ext2)
else:
libname_ext = [libname]
@@ -120,15 +116,16 @@ else:
else:
libdir = loader_path
- # Need to save exception when using Python 3k, see PEP 3110.
- exc = None
for ln in libname_ext:
- try:
- libpath = os.path.join(libdir, ln)
- return ctypes.cdll[libpath]
- except OSError, e:
- exc = e
- raise exc
+ libpath = os.path.join(libdir, ln)
+ if os.path.exists(libpath):
+ try:
+ return ctypes.cdll[libpath]
+ except OSError:
+ ## defective lib file
+ raise
+ ## if no successful return in the libname_ext loop:
+ raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
@@ -352,7 +349,7 @@ if ctypes is not None:
shape = []
ob = array_type
- while type(ob) == _ARRAY_TYPE:
+ while type(ob) is _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index cdc5d45b6..b43e08b05 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -1,14 +1,16 @@
+from __future__ import division, absolute_import, print_function
+
import sys
if sys.version_info[0] < 3:
- from __version__ import version as __version__
+ from .__version__ import version as __version__
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
- import ccompiler
- import unixccompiler
+ from . import ccompiler
+ from . import unixccompiler
- from info import __doc__
- from npy_pkg_config import *
+ from .info import __doc__
+ from .npy_pkg_config import *
try:
import __config__
diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py
index 06077f79c..969decbba 100644
--- a/numpy/distutils/__version__.py
+++ b/numpy/distutils/__version__.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
major = 0
minor = 4
micro = 0
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index e3b88af08..48f6f09b6 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import re
import os
import sys
@@ -14,7 +16,7 @@ from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
- quote_args, msvc_on_amd64
+ quote_args
from numpy.distutils.compat import get_exception
@@ -163,7 +165,7 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
- from fcompiler import FCompiler
+ from .fcompiler import FCompiler
else:
from numpy.distutils.fcompiler import FCompiler
if isinstance(self, FCompiler):
@@ -190,7 +192,7 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
# build any sources in same order as they were originally specified
# especially important for fortran .f90 files using modules
if isinstance(self, FCompiler):
- objects_to_build = build.keys()
+ objects_to_build = list(build.keys())
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
@@ -253,7 +255,7 @@ replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
- keys = compiler.executables.keys()
+ keys = list(compiler.executables.keys())
for key in ['version','libraries','library_dirs',
'object_switch','compile_switch',
'include_dirs','define','undef','rpath','link_objects']:
@@ -401,7 +403,7 @@ def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
if not m:
return None
pos = m.end()
- while 1:
+ while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
@@ -652,6 +654,3 @@ def split_quoted(s):
return words
ccompiler.split_quoted = split_quoted
##Fix distutils.util.split_quoted:
-
-# define DISTUTILS_USE_SDK when necessary to workaround distutils/msvccompiler.py bug
-msvc_on_amd64()
diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py
index f8f0884da..94a7185bb 100644
--- a/numpy/distutils/command/__init__.py
+++ b/numpy/distutils/command/__init__.py
@@ -1,7 +1,17 @@
"""distutils.command
Package containing implementation of all the standard Distutils
-commands."""
+commands.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+def test_na_writable_attributes_deletion():
+ a = np.NA(2)
+ attr = ['payload', 'dtype']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py
index fe40119ef..8e70b4552 100644
--- a/numpy/distutils/command/autodist.py
+++ b/numpy/distutils/command/autodist.py
@@ -1,4 +1,8 @@
-"""This module implements additional tests ala autoconf which can be useful."""
+"""This module implements additional tests ala autoconf which can be useful.
+
+"""
+from __future__ import division, absolute_import, print_function
+
# We put them here since they could be easily reused outside numpy.distutils
diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py
index 60e9b5752..e24072ff4 100644
--- a/numpy/distutils/command/bdist_rpm.py
+++ b/numpy/distutils/command/bdist_rpm.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
if 'setuptools' in sys.modules:
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index 5d986570c..0577738bf 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
from distutils.command.build import build as old_build
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index d9cfca73e..8b150d800 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -1,5 +1,6 @@
""" Modified version of build_clib that handles fortran source files.
"""
+from __future__ import division, absolute_import, print_function
import os
from glob import glob
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index f63d5249c..97bfa1613 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -1,5 +1,7 @@
""" Modified version of build_ext that handles fortran source files.
+
"""
+from __future__ import division, absolute_import, print_function
import os
import sys
@@ -231,11 +233,6 @@ class build_ext (old_build_ext):
# Build extensions
self.build_extensions()
- # Make sure that scons based extensions are complete.
- if self.inplace:
- cmd = self.reinitialize_command('scons')
- cmd.inplace = 1
- self.run_command('scons')
def swig_sources(self, sources):
# Do nothing. Swig sources have beed handled in build_src command.
@@ -300,8 +297,8 @@ class build_ext (old_build_ext):
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
- fcompiler.extra_f77_compile_args = ext.extra_f77_compile_args or []
- fcompiler.extra_f90_compile_args = ext.extra_f90_compile_args or []
+ fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext,'extra_f77_compile_args') else []
+ fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext,'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py
index 4c02e4136..c25fa227d 100644
--- a/numpy/distutils/command/build_py.py
+++ b/numpy/distutils/command/build_py.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from distutils.command.build_py import build_py as old_build_py
from numpy.distutils.misc_util import is_string
@@ -7,7 +8,7 @@ class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if build_src.py_modules_dict and self.packages is None:
- self.packages = build_src.py_modules_dict.keys ()
+ self.packages = list(build_src.py_modules_dict.keys ())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
@@ -21,7 +22,7 @@ class build_py(old_build_py):
def find_modules(self):
old_py_modules = self.py_modules[:]
- new_py_modules = filter(is_string, self.py_modules)
+ new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py
index 99134f202..c8b25fc71 100644
--- a/numpy/distutils/command/build_scripts.py
+++ b/numpy/distutils/command/build_scripts.py
@@ -1,5 +1,7 @@
""" Modified version of build_scripts that handles building scripts from functions.
+
"""
+from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index ae29aec0e..2a33e5175 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -1,5 +1,6 @@
""" Build swig, f2py, pyrex sources.
"""
+from __future__ import division, absolute_import, print_function
import os
import re
@@ -36,7 +37,7 @@ def subst_vars(target, source, d):
try:
ft = open(target, 'w')
try:
- for l in fs.readlines():
+ for l in fs:
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
@@ -186,10 +187,10 @@ class build_src(build_ext.build_ext):
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
else:
build_dir = os.path.join(self.build_src,d)
- funcs = filter(lambda f:hasattr(f, '__call__'), files)
- files = filter(lambda f:not hasattr(f, '__call__'), files)
+ funcs = [f for f in files if hasattr(f, '__call__')]
+ files = [f for f in files if not hasattr(f, '__call__')]
for f in funcs:
- if f.func_code.co_argcount==1:
+ if f.__code__.co_argcount==1:
s = f(build_dir)
else:
s = f()
@@ -673,24 +674,21 @@ class build_src(build_ext.build_ext):
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
- if is_cpp:
- target_ext = '.cpp'
else:
typ2 = get_swig_target(source)
if typ2 is None:
log.warn('source %r does not define swig target, assuming %s swig target' \
% (source, typ))
- if is_cpp:
- target_ext = '.cpp'
elif typ!=typ2:
log.warn('expected %r but source %r defines %r swig target' \
% (typ, source, typ2))
if typ2=='c++':
log.warn('resetting swig target to c++ (some targets may have .c extension)')
is_cpp = True
- target_ext = '.cpp'
else:
log.warn('assuming that %r has c++ swig target' % (source))
+ if is_cpp:
+ target_ext = '.cpp'
target_file = os.path.join(target_dir,'%s_wrap%s' \
% (name, target_ext))
else:
@@ -769,9 +767,8 @@ def get_swig_target(source):
def get_swig_modulename(source):
f = open(source,'r')
- f_readlines = getattr(f,'xreadlines',f.readlines)
name = None
- for line in f_readlines():
+ for line in f:
m = _swig_module_name_match(line)
if m:
name = m.group('name')
@@ -796,8 +793,7 @@ _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*
def get_f2py_modulename(source):
name = None
f = open(source)
- f_readlines = getattr(f,'xreadlines',f.readlines)
- for line in f_readlines():
+ for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 85a86990f..fd8a135fb 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -2,6 +2,7 @@
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
+from __future__ import division, absolute_import, print_function
import os, signal
import warnings
@@ -339,7 +340,7 @@ int main ()
Arguments
---------
- funcs: seq
+ funcs : seq
list of functions to test
include_dirs : seq
list of header paths
diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py
index e7fee94df..bf776dd02 100644
--- a/numpy/distutils/command/config_compiler.py
+++ b/numpy/distutils/command/config_compiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from distutils.core import Command
from numpy.distutils import log
diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py
index 167706671..1410ab2a0 100644
--- a/numpy/distutils/command/develop.py
+++ b/numpy/distutils/command/develop.py
@@ -1,7 +1,9 @@
""" Override the develop command from setuptools so we can ensure that our
generated files (from build_src or build_scripts) are properly converted to real
files with filenames.
+
"""
+from __future__ import division, absolute_import, print_function
from setuptools.command.develop import develop as old_develop
diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py
index 687faf080..b7104de5b 100644
--- a/numpy/distutils/command/egg_info.py
+++ b/numpy/distutils/command/egg_info.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from setuptools.command.egg_info import egg_info as _egg_info
class egg_info(_egg_info):
diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py
index ad3cc507d..9dd8dede9 100644
--- a/numpy/distutils/command/install.py
+++ b/numpy/distutils/command/install.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
@@ -62,7 +64,7 @@ class install(old_install):
f = open(self.record,'r')
lines = []
need_rewrite = False
- for l in f.readlines():
+ for l in f:
l = l.rstrip()
if ' ' in l:
need_rewrite = True
diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py
index 638d4beac..662aa00bd 100644
--- a/numpy/distutils/command/install_clib.py
+++ b/numpy/distutils/command/install_clib.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py
index 0a2e68ae1..996cf7e40 100644
--- a/numpy/distutils/command/install_data.py
+++ b/numpy/distutils/command/install_data.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
have_setuptools = ('setuptools' in sys.modules)
diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py
index 58ace1064..84eb5f6da 100644
--- a/numpy/distutils/command/install_headers.py
+++ b/numpy/distutils/command/install_headers.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from distutils.command.install_headers import install_headers as old_install_headers
diff --git a/numpy/distutils/command/scons.py b/numpy/distutils/command/scons.py
deleted file mode 100644
index d7bbec35e..000000000
--- a/numpy/distutils/command/scons.py
+++ /dev/null
@@ -1,589 +0,0 @@
-import os
-import sys
-import os.path
-from os.path import join as pjoin, dirname as pdirname
-
-from distutils.errors import DistutilsPlatformError
-from distutils.errors import DistutilsExecError, DistutilsSetupError
-
-from numpy.distutils.command.build_ext import build_ext as old_build_ext
-from numpy.distutils.ccompiler import CCompiler, new_compiler
-from numpy.distutils.fcompiler import FCompiler, new_fcompiler
-from numpy.distutils.exec_command import find_executable
-from numpy.distutils import log
-from numpy.distutils.misc_util import is_bootstrapping, get_cmd
-from numpy.distutils.misc_util import get_numpy_include_dirs as _incdir
-from numpy.distutils.compat import get_exception
-
-# A few notes:
-# - numscons is not mandatory to build numpy, so we cannot import it here.
-# Any numscons import has to happen once we check numscons is available and
-# is required for the build (call through setupscons.py or native numscons
-# build).
-def get_scons_build_dir():
- """Return the top path where everything produced by scons will be put.
-
- The path is relative to the top setup.py"""
- from numscons import get_scons_build_dir
- return get_scons_build_dir()
-
-def get_scons_pkg_build_dir(pkg):
- """Return the build directory for the given package (foo.bar).
-
- The path is relative to the top setup.py"""
- from numscons.core.utils import pkg_to_path
- return pjoin(get_scons_build_dir(), pkg_to_path(pkg))
-
-def get_scons_configres_dir():
- """Return the top path where everything produced by scons will be put.
-
- The path is relative to the top setup.py"""
- from numscons import get_scons_configres_dir
- return get_scons_configres_dir()
-
-def get_scons_configres_filename():
- """Return the top path where everything produced by scons will be put.
-
- The path is relative to the top setup.py"""
- from numscons import get_scons_configres_filename
- return get_scons_configres_filename()
-
-def get_scons_local_path():
- """This returns the full path where scons.py for scons-local is located."""
- from numscons import get_scons_path
- return get_scons_path()
-
-def _get_top_dir(pkg):
- # XXX: this mess is necessary because scons is launched per package, and
- # has no knowledge outside its build dir, which is package dependent. If
- # one day numscons does not launch one process/package, this will be
- # unnecessary.
- from numscons import get_scons_build_dir
- from numscons.core.utils import pkg_to_path
- scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg))
- n = scdir.count(os.sep)
- return os.sep.join([os.pardir for i in range(n+1)])
-
-def get_distutils_libdir(cmd, pkg):
- """Returns the path where distutils install libraries, relatively to the
- scons build directory."""
- return pjoin(_get_top_dir(pkg), cmd.build_lib)
-
-def get_distutils_clibdir(cmd, pkg):
- """Returns the path where distutils put pure C libraries."""
- return pjoin(_get_top_dir(pkg), cmd.build_clib)
-
-def get_distutils_install_prefix(pkg, inplace):
- """Returns the installation path for the current package."""
- from numscons.core.utils import pkg_to_path
- if inplace == 1:
- return pkg_to_path(pkg)
- else:
- install_cmd = get_cmd('install').get_finalized_command('install')
- return pjoin(install_cmd.install_libbase, pkg_to_path(pkg))
-
-def get_python_exec_invoc():
- """This returns the python executable from which this file is invocated."""
- # Do we need to take into account the PYTHONPATH, in a cross platform way,
- # that is the string returned can be executed directly on supported
- # platforms, and the sys.path of the executed python should be the same
- # than the caller ? This may not be necessary, since os.system is said to
- # take into accound os.environ. This actually also works for my way of
- # using "local python", using the alias facility of bash.
- return sys.executable
-
-def get_numpy_include_dirs(sconscript_path):
- """Return include dirs for numpy.
-
- The paths are relatively to the setup.py script path."""
- from numscons import get_scons_build_dir
- scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path))
- n = scdir.count(os.sep)
-
- dirs = _incdir()
- rdirs = []
- for d in dirs:
- rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d))
- return rdirs
-
-def dirl_to_str(dirlist):
- """Given a list of directories, returns a string where the paths are
- concatenated by the path separator.
-
- example: ['foo/bar', 'bar/foo'] will return 'foo/bar:bar/foo'."""
- return os.pathsep.join(dirlist)
-
-def dist2sconscc(compiler):
- """This converts the name passed to distutils to scons name convention (C
- compiler). compiler should be a CCompiler instance.
-
- Example:
- --compiler=intel -> intelc"""
- compiler_type = compiler.compiler_type
- if compiler_type == 'msvc':
- return 'msvc'
- elif compiler_type == 'intel':
- return 'intelc'
- else:
- return compiler.compiler[0]
-
-def dist2sconsfc(compiler):
- """This converts the name passed to distutils to scons name convention
- (Fortran compiler). The argument should be a FCompiler instance.
-
- Example:
- --fcompiler=intel -> ifort on linux, ifl on windows"""
- if compiler.compiler_type == 'intel':
- #raise NotImplementedError('FIXME: intel fortran compiler name ?')
- return 'ifort'
- elif compiler.compiler_type == 'gnu':
- return 'g77'
- elif compiler.compiler_type == 'gnu95':
- return 'gfortran'
- elif compiler.compiler_type == 'sun':
- return 'sunf77'
- else:
- # XXX: Just give up for now, and use generic fortran compiler
- return 'fortran'
-
-def dist2sconscxx(compiler):
- """This converts the name passed to distutils to scons name convention
- (C++ compiler). The argument should be a Compiler instance."""
- if compiler.compiler_type == 'msvc':
- return compiler.compiler_type
-
- return compiler.compiler_cxx[0]
-
-def get_compiler_executable(compiler):
- """For any give CCompiler instance, this gives us the name of C compiler
- (the actual executable).
-
- NOTE: does NOT work with FCompiler instances."""
- # Geez, why does distutils has no common way to get the compiler name...
- if compiler.compiler_type == 'msvc':
- # this is harcoded in distutils... A bit cleaner way would be to
- # initialize the compiler instance and then get compiler.cc, but this
- # may be costly: we really just want a string.
- # XXX: we need to initialize the compiler anyway, so do not use
- # hardcoded string
- #compiler.initialize()
- #print compiler.cc
- return 'cl.exe'
- else:
- return compiler.compiler[0]
-
-def get_f77_compiler_executable(compiler):
- """For any give FCompiler instance, this gives us the name of F77 compiler
- (the actual executable)."""
- return compiler.compiler_f77[0]
-
-def get_cxxcompiler_executable(compiler):
- """For any give CCompiler instance, this gives us the name of CXX compiler
- (the actual executable).
-
- NOTE: does NOT work with FCompiler instances."""
- # Geez, why does distutils has no common way to get the compiler name...
- if compiler.compiler_type == 'msvc':
- # this is harcoded in distutils... A bit cleaner way would be to
- # initialize the compiler instance and then get compiler.cc, but this
- # may be costly: we really just want a string.
- # XXX: we need to initialize the compiler anyway, so do not use
- # hardcoded string
- #compiler.initialize()
- #print compiler.cc
- return 'cl.exe'
- else:
- return compiler.compiler_cxx[0]
-
-def get_tool_path(compiler):
- """Given a distutils.ccompiler.CCompiler class, returns the path of the
- toolset related to C compilation."""
- fullpath_exec = find_executable(get_compiler_executable(compiler))
- if fullpath_exec:
- fullpath = pdirname(fullpath_exec)
- else:
- raise DistutilsSetupError("Could not find compiler executable info for scons")
- return fullpath
-
-def get_f77_tool_path(compiler):
- """Given a distutils.ccompiler.FCompiler class, returns the path of the
- toolset related to F77 compilation."""
- fullpath_exec = find_executable(get_f77_compiler_executable(compiler))
- if fullpath_exec:
- fullpath = pdirname(fullpath_exec)
- else:
- raise DistutilsSetupError("Could not find F77 compiler executable "\
- "info for scons")
- return fullpath
-
-def get_cxx_tool_path(compiler):
- """Given a distutils.ccompiler.CCompiler class, returns the path of the
- toolset related to C compilation."""
- fullpath_exec = find_executable(get_cxxcompiler_executable(compiler))
- if fullpath_exec:
- fullpath = pdirname(fullpath_exec)
- else:
- raise DistutilsSetupError("Could not find compiler executable info for scons")
- return fullpath
-
-def protect_path(path):
- """Convert path (given as a string) to something the shell will have no
- problem to understand (space, etc... problems)."""
- if path:
- # XXX: to this correctly, this is totally bogus for now (does not check for
- # already quoted path, for example).
- return '"' + path + '"'
- else:
- return '""'
-
-def parse_package_list(pkglist):
- return pkglist.split(",")
-
-def find_common(seq1, seq2):
- """Given two list, return the index of the common items.
-
- The index are relative to seq1.
-
- Note: do not handle duplicate items."""
- dict2 = dict([(i, None) for i in seq2])
-
- return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])]
-
-def select_packages(sconspkg, pkglist):
- """Given a list of packages in pkglist, return the list of packages which
- match this list."""
- common = find_common(sconspkg, pkglist)
- if not len(common) == len(pkglist):
- msg = "the package list contains a package not found in "\
- "the current list. The current list is %s" % sconspkg
- raise ValueError(msg)
- return common
-
-def check_numscons(minver):
- """Check that we can use numscons.
-
- minver is a 3 integers tuple which defines the min version."""
- try:
- import numscons
- except ImportError:
- e = get_exception()
- raise RuntimeError("importing numscons failed (error was %s), using " \
- "scons within distutils is not possible without "
- "this package " % str(e))
-
- try:
- # version_info was added in 0.10.0
- from numscons import version_info
- # Stupid me used string instead of numbers in version_info in
- # dev versions of 0.10.0
- if isinstance(version_info[0], str):
- raise ValueError("Numscons %s or above expected " \
- "(detected 0.10.0)" % str(minver))
- # Stupid me used list instead of tuple in numscons
- version_info = tuple(version_info)
- if version_info[:3] < minver:
- raise ValueError("Numscons %s or above expected (got %s) "
- % (str(minver), str(version_info[:3])))
- except ImportError:
- raise RuntimeError("You need numscons >= %s to build numpy "\
- "with numscons (imported numscons path " \
- "is %s)." % (minver, numscons.__file__))
-
-# XXX: this is a giantic mess. Refactor this at some point.
-class scons(old_build_ext):
- # XXX: add an option to the scons command for configuration (auto/force/cache).
- description = "Scons builder"
-
- library_options = [
- ('with-perflib=', None,
- 'Specify which performance library to use for BLAS/LAPACK/etc...' \
- 'Examples: mkl/atlas/sunper/accelerate'),
- ('with-mkl-lib=', None, 'TODO'),
- ('with-mkl-include=', None, 'TODO'),
- ('with-mkl-libraries=', None, 'TODO'),
- ('with-atlas-lib=', None, 'TODO'),
- ('with-atlas-include=', None, 'TODO'),
- ('with-atlas-libraries=', None, 'TODO')
- ]
- user_options = [
- ('jobs=', 'j', "specify number of worker threads when executing" \
- "scons"),
- ('inplace', 'i', 'If specified, build in place.'),
- ('import-env', 'e', 'If specified, import user environment into scons env["ENV"].'),
- ('bypass', 'b', 'Bypass distutils compiler detection (experimental).'),
- ('scons-tool-path=', None, 'specify additional path '\
- '(absolute) to look for scons tools'),
- ('silent=', None, 'specify whether scons output should less verbose'\
- '(1), silent (2), super silent (3) or not (0, default)'),
- ('log-level=', None, 'specify log level for numscons. Any value ' \
- 'valid for the logging python module is valid'),
- ('package-list=', None,
- 'If specified, only run scons on the given '\
- 'packages (example: --package-list=scipy.cluster). If empty, '\
- 'no package is built'),
- ('fcompiler=', None, "specify the Fortran compiler type"),
- ('compiler=', None, "specify the C compiler type"),
- ('cxxcompiler=', None,
- "specify the C++ compiler type (same as C by default)"),
- ('debug', 'g',
- "compile/link with debugging information"),
- ] + library_options
-
- def initialize_options(self):
- old_build_ext.initialize_options(self)
- self.build_clib = None
-
- self.debug = 0
-
- self.compiler = None
- self.cxxcompiler = None
- self.fcompiler = None
-
- self.jobs = None
- self.silent = 0
- self.import_env = 0
- self.scons_tool_path = ''
- # If true, we bypass distutils to find the c compiler altogether. This
- # is to be used in desperate cases (like incompatible visual studio
- # version).
- self._bypass_distutils_cc = False
-
- # scons compilers
- self.scons_compiler = None
- self.scons_compiler_path = None
- self.scons_fcompiler = None
- self.scons_fcompiler_path = None
- self.scons_cxxcompiler = None
- self.scons_cxxcompiler_path = None
-
- self.package_list = None
- self.inplace = 0
- self.bypass = 0
-
- # Only critical things
- self.log_level = 50
-
- # library options
- self.with_perflib = []
- self.with_mkl_lib = []
- self.with_mkl_include = []
- self.with_mkl_libraries = []
- self.with_atlas_lib = []
- self.with_atlas_include = []
- self.with_atlas_libraries = []
-
- def _init_ccompiler(self, compiler_type):
- # XXX: The logic to bypass distutils is ... not so logic.
- if compiler_type == 'msvc':
- self._bypass_distutils_cc = True
- try:
- distutils_compiler = new_compiler(compiler=compiler_type,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force)
- distutils_compiler.customize(self.distribution)
- # This initialization seems necessary, sometimes, for find_executable to work...
- if hasattr(distutils_compiler, 'initialize'):
- distutils_compiler.initialize()
- self.scons_compiler = dist2sconscc(distutils_compiler)
- self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler))
- except DistutilsPlatformError:
- e = get_exception()
- if not self._bypass_distutils_cc:
- raise e
- else:
- self.scons_compiler = compiler_type
-
- def _init_fcompiler(self, compiler_type):
- self.fcompiler = new_fcompiler(compiler = compiler_type,
- verbose = self.verbose,
- dry_run = self.dry_run,
- force = self.force)
-
- if self.fcompiler is not None:
- self.fcompiler.customize(self.distribution)
- self.scons_fcompiler = dist2sconsfc(self.fcompiler)
- self.scons_fcompiler_path = protect_path(get_f77_tool_path(self.fcompiler))
-
- def _init_cxxcompiler(self, compiler_type):
- cxxcompiler = new_compiler(compiler = compiler_type,
- verbose = self.verbose,
- dry_run = self.dry_run,
- force = self.force)
- if cxxcompiler is not None:
- cxxcompiler.customize(self.distribution, need_cxx = 1)
- cxxcompiler.customize_cmd(self)
- self.cxxcompiler = cxxcompiler.cxx_compiler()
- try:
- get_cxx_tool_path(self.cxxcompiler)
- except DistutilsSetupError:
- self.cxxcompiler = None
-
- if self.cxxcompiler:
- self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler)
- self.scons_cxxcompiler_path = protect_path(get_cxx_tool_path(self.cxxcompiler))
-
- def finalize_options(self):
- old_build_ext.finalize_options(self)
-
- self.sconscripts = []
- self.pre_hooks = []
- self.post_hooks = []
- self.pkg_names = []
- self.pkg_paths = []
-
- if self.distribution.has_scons_scripts():
- for i in self.distribution.scons_data:
- self.sconscripts.append(i.scons_path)
- self.pre_hooks.append(i.pre_hook)
- self.post_hooks.append(i.post_hook)
- self.pkg_names.append(i.parent_name)
- self.pkg_paths.append(i.pkg_path)
- # This crap is needed to get the build_clib
- # directory
- build_clib_cmd = get_cmd("build_clib").get_finalized_command("build_clib")
- self.build_clib = build_clib_cmd.build_clib
-
- if not self.cxxcompiler:
- self.cxxcompiler = self.compiler
-
- # To avoid trouble, just don't do anything if no sconscripts are used.
- # This is useful when for example f2py uses numpy.distutils, because
- # f2py does not pass compiler information to scons command, and the
- # compilation setup below can crash in some situation.
- if len(self.sconscripts) > 0:
- if self.bypass:
- self.scons_compiler = self.compiler
- self.scons_fcompiler = self.fcompiler
- self.scons_cxxcompiler = self.cxxcompiler
- else:
- # Try to get the same compiler than the ones used by distutils: this is
- # non trivial because distutils and scons have totally different
- # conventions on this one (distutils uses PATH from user's environment,
- # whereas scons uses standard locations). The way we do it is once we
- # got the c compiler used, we use numpy.distutils function to get the
- # full path, and add the path to the env['PATH'] variable in env
- # instance (this is done in numpy.distutils.scons module).
-
- self._init_ccompiler(self.compiler)
- self._init_fcompiler(self.fcompiler)
- self._init_cxxcompiler(self.cxxcompiler)
-
- if self.package_list:
- self.package_list = parse_package_list(self.package_list)
-
- def _call_scons(self, scons_exec, sconscript, pkg_name, pkg_path, bootstrapping):
- # XXX: when a scons script is missing, scons only prints warnings, and
- # does not return a failure (status is 0). We have to detect this from
- # distutils (this cannot work for recursive scons builds...)
-
- # XXX: passing everything at command line may cause some trouble where
- # there is a size limitation ? What is the standard solution in thise
- # case ?
-
- cmd = [scons_exec, "-f", sconscript, '-I.']
- if self.jobs:
- cmd.append(" --jobs=%d" % int(self.jobs))
- if self.inplace:
- cmd.append("inplace=1")
- cmd.append('scons_tool_path="%s"' % self.scons_tool_path)
- cmd.append('src_dir="%s"' % pdirname(sconscript))
- cmd.append('pkg_path="%s"' % pkg_path)
- cmd.append('pkg_name="%s"' % pkg_name)
- cmd.append('log_level=%s' % self.log_level)
- #cmd.append('distutils_libdir=%s' % protect_path(pjoin(self.build_lib,
- # pdirname(sconscript))))
- cmd.append('distutils_libdir=%s' %
- protect_path(get_distutils_libdir(self, pkg_name)))
- cmd.append('distutils_clibdir=%s' %
- protect_path(get_distutils_clibdir(self, pkg_name)))
- prefix = get_distutils_install_prefix(pkg_name, self.inplace)
- cmd.append('distutils_install_prefix=%s' % protect_path(prefix))
-
- if not self._bypass_distutils_cc:
- cmd.append('cc_opt=%s' % self.scons_compiler)
- if self.scons_compiler_path:
- cmd.append('cc_opt_path=%s' % self.scons_compiler_path)
- else:
- cmd.append('cc_opt=%s' % self.scons_compiler)
-
- cmd.append('debug=%s' % self.debug)
-
- if self.scons_fcompiler:
- cmd.append('f77_opt=%s' % self.scons_fcompiler)
- if self.scons_fcompiler_path:
- cmd.append('f77_opt_path=%s' % self.scons_fcompiler_path)
-
- if self.scons_cxxcompiler:
- cmd.append('cxx_opt=%s' % self.scons_cxxcompiler)
- if self.scons_cxxcompiler_path:
- cmd.append('cxx_opt_path=%s' % self.scons_cxxcompiler_path)
-
- cmd.append('include_bootstrap=%s' % dirl_to_str(get_numpy_include_dirs(sconscript)))
- cmd.append('bypass=%s' % self.bypass)
- cmd.append('import_env=%s' % self.import_env)
- if self.silent:
- if int(self.silent) == 2:
- cmd.append('-Q')
- elif int(self.silent) == 3:
- cmd.append('-s')
- cmd.append('silent=%d' % int(self.silent))
- cmd.append('bootstrapping=%d' % bootstrapping)
- cmdstr = ' '.join(cmd)
- if int(self.silent) < 1:
- log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr)
- else:
- log.info("======== Executing scons command for pkg %s =========", pkg_name)
- st = os.system(cmdstr)
- if st:
- #print "status is %d" % st
- msg = "Error while executing scons command."
- msg += " See above for more information.\n"
- msg += """\
-If you think it is a problem in numscons, you can also try executing the scons
-command with --log-level option for more detailed output of what numscons is
-doing, for example --log-level=0; the lowest the level is, the more detailed
-the output it."""
- raise DistutilsExecError(msg)
-
- def run(self):
- if len(self.sconscripts) < 1:
- # nothing to do, just leave it here.
- return
-
- check_numscons(minver=(0, 11, 0))
-
- if self.package_list is not None:
- id = select_packages(self.pkg_names, self.package_list)
- sconscripts = [self.sconscripts[i] for i in id]
- pre_hooks = [self.pre_hooks[i] for i in id]
- post_hooks = [self.post_hooks[i] for i in id]
- pkg_names = [self.pkg_names[i] for i in id]
- pkg_paths = [self.pkg_paths[i] for i in id]
- else:
- sconscripts = self.sconscripts
- pre_hooks = self.pre_hooks
- post_hooks = self.post_hooks
- pkg_names = self.pkg_names
- pkg_paths = self.pkg_paths
-
- if is_bootstrapping():
- bootstrapping = 1
- else:
- bootstrapping = 0
-
- scons_exec = get_python_exec_invoc()
- scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py'))
-
- for sconscript, pre_hook, post_hook, pkg_name, pkg_path in zip(sconscripts,
- pre_hooks, post_hooks,
- pkg_names, pkg_paths):
- if pre_hook:
- pre_hook()
-
- if sconscript:
- self._call_scons(scons_exec, sconscript, pkg_name, pkg_path, bootstrapping)
-
- if post_hook:
- post_hook(**{'pkg_name': pkg_name, 'scons_cmd' : self})
-
diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py
index 62fce9574..7d8cc7826 100644
--- a/numpy/distutils/command/sdist.py
+++ b/numpy/distutils/command/sdist.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
if 'setuptools' in sys.modules:
from setuptools.command.sdist import sdist as old_sdist
diff --git a/numpy/distutils/compat.py b/numpy/distutils/compat.py
index 1c37dc2b9..9a81cd392 100644
--- a/numpy/distutils/compat.py
+++ b/numpy/distutils/compat.py
@@ -1,6 +1,9 @@
"""Small modules to cope with python 2 vs 3 incompatibilities inside
numpy.distutils
+
"""
+from __future__ import division, absolute_import, print_function
+
import sys
def get_exception():
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index 368cdd457..cb03fc7c3 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -24,7 +24,7 @@ combinations of the variables using (inside the comment portion of the inner loo
This will exlude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
-
+
In the main body each replace will use one entry from the list of named replacements
@@ -78,6 +78,8 @@ Example:
3, 3, jim
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['process_str', 'process_file']
@@ -118,7 +120,7 @@ def parse_structure(astr, level):
ind = 0
line = 0
spanlist = []
- while 1:
+ while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
@@ -269,14 +271,14 @@ def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
- for line in fid.readlines():
+ for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
- print ('Including file',fn)
+ print('Including file',fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
@@ -301,7 +303,7 @@ def unique_key(adict):
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
- allkeys = adict.keys()
+ allkeys = list(adict.keys())
done = False
n = 1
while not done:
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
index e617589a2..3b5ae5630 100644
--- a/numpy/distutils/core.py
+++ b/numpy/distutils/core.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
import sys
from distutils.core import *
@@ -24,7 +25,7 @@ from numpy.distutils.extension import Extension
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
- sdist, install_data, install_headers, install, bdist_rpm, scons, \
+ sdist, install_data, install_headers, install, bdist_rpm, \
install_clib
from numpy.distutils.misc_util import get_data_files, is_sequence, is_string
@@ -38,7 +39,6 @@ numpy_cmdclass = {'build': build.build,
'build_py': build_py.build_py,
'build_clib': build_clib.build_clib,
'sdist': sdist.sdist,
- 'scons': scons.scons,
'install_data': install_data.install_data,
'install_headers': install_headers.install_headers,
'install_clib': install_clib.install_clib,
@@ -99,30 +99,13 @@ def get_distribution(always=False):
# class is local to a function in setuptools.command.easy_install
if dist is not None and \
'DistributionWithoutHelpCommands' in repr(dist):
- #raise NotImplementedError("setuptools not supported yet for numpy.scons branch")
dist = None
if always and dist is None:
dist = NumpyDistribution()
return dist
-def _exit_interactive_session(_cache=[]):
- if _cache:
- return # been here
- _cache.append(1)
- print('-'*72)
- raw_input('Press ENTER to close the interactive session..')
- print('='*72)
-
def setup(**attr):
- if len(sys.argv)<=1 and not attr.get('script_args',[]):
- from interactive import interactive_sys_argv
- import atexit
- atexit.register(_exit_interactive_session)
- sys.argv[:] = interactive_sys_argv(sys.argv)
- if len(sys.argv)>1:
- return setup(**attr)
-
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index a9b2af108..64ad055d3 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -10,16 +10,20 @@ this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['cpu']
import sys, re, types
import os
-if sys.version_info[0] < 3:
- from commands import getstatusoutput
-else:
+
+if sys.version_info[0] >= 3:
from subprocess import getstatusoutput
+else:
+ from commands import getstatusoutput
+
import warnings
import platform
@@ -78,7 +82,7 @@ class CPUInfoBase(object):
if not name.startswith('_'):
if hasattr(self,'_'+name):
attr = getattr(self,'_'+name)
- if type(attr) is types.MethodType:
+ if isinstance(attr, types.MethodType):
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
@@ -486,25 +490,29 @@ class Win32CPUInfo(CPUInfoBase):
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
- import _winreg
+ if sys.version_info[0] >= 3:
+ import winreg
+ else:
+ import _winreg as winreg
+
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\
"\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE)
- chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey)
+ chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
- while 1:
+ while True:
try:
- proc=_winreg.EnumKey(chnd,pnum)
- except _winreg.error:
+ proc=winreg.EnumKey(chnd,pnum)
+ except winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
- phnd=_winreg.OpenKey(chnd,proc)
+ phnd=winreg.OpenKey(chnd,proc)
pidx=0
while True:
try:
- name,value,vtpe=_winreg.EnumValue(phnd,pidx)
- except _winreg.error:
+ name,value,vtpe=winreg.EnumValue(phnd,pidx)
+ except winreg.error:
break
else:
pidx=pidx+1
@@ -516,7 +524,7 @@ class Win32CPUInfo(CPUInfoBase):
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except:
- print(sys.exc_value,'(ignoring)')
+ print(sys.exc_info()[1],'(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
diff --git a/numpy/distutils/environment.py b/numpy/distutils/environment.py
index 7d3ae56ae..3798e16f5 100644
--- a/numpy/distutils/environment.py
+++ b/numpy/distutils/environment.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from distutils.dist import Distribution
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index e0c3e1c97..0af60ec34 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -44,6 +44,7 @@ Known bugs:
- Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['exec_command','find_executable']
@@ -140,6 +141,19 @@ def _update_environment( **env ):
for name,value in env.items():
os.environ[name] = value or ''
+def _supports_fileno(stream):
+ """
+ Returns True if 'stream' supports the file descriptor and allows fileno().
+ """
+ if hasattr(stream, 'fileno'):
+ try:
+ r = stream.fileno()
+ return True
+ except IOError:
+ return False
+ else:
+ return False
+
def exec_command( command,
execute_in='', use_shell=None, use_tee = None,
_with_python = 1,
@@ -181,7 +195,7 @@ def exec_command( command,
else:
log.debug('Retaining cwd: %s' % oldcwd)
- oldenv = _preserve_environment( env.keys() )
+ oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
@@ -194,7 +208,8 @@ def exec_command( command,
# _exec_command_posix uses os.system and is faster
# but not on all platforms os.system will return
# a correct status.
- if _with_python and (0 or sys.__stdout__.fileno()==-1):
+ if (_with_python and _supports_fileno(sys.stdout) and
+ sys.stdout.fileno() == -1):
st = _exec_command_python(command,
exec_command_dir = exec_dir,
**env)
@@ -348,14 +363,16 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
argv = [os.environ['COMSPEC'],'/C'] + argv
using_command = 1
- # sys.__std*__ is used instead of sys.std* because environments
- # like IDLE, PyCrust, etc overwrite sys.std* commands.
- so_fileno = sys.__stdout__.fileno()
- se_fileno = sys.__stderr__.fileno()
- so_flush = sys.__stdout__.flush
- se_flush = sys.__stderr__.flush
- so_dup = os.dup(so_fileno)
- se_dup = os.dup(se_fileno)
+ _so_has_fileno = _supports_fileno(sys.stdout)
+ _se_has_fileno = _supports_fileno(sys.stderr)
+ so_flush = sys.stdout.flush
+ se_flush = sys.stderr.flush
+ if _so_has_fileno:
+ so_fileno = sys.stdout.fileno()
+ so_dup = os.dup(so_fileno)
+ if _se_has_fileno:
+ se_fileno = sys.stderr.fileno()
+ se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile,'w')
@@ -372,13 +389,16 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
so_flush()
se_flush()
- os.dup2(fout.fileno(),so_fileno)
- if using_command:
- #XXX: disabled for now as it does not work from cmd under win32.
- # Tests fail on msys
- os.dup2(ferr.fileno(),se_fileno)
- else:
- os.dup2(fout.fileno(),se_fileno)
+ if _so_has_fileno:
+ os.dup2(fout.fileno(),so_fileno)
+
+ if _se_has_fileno:
+ if using_command:
+ #XXX: disabled for now as it does not work from cmd under win32.
+ # Tests fail on msys
+ os.dup2(ferr.fileno(),se_fileno)
+ else:
+ os.dup2(fout.fileno(),se_fileno)
try:
status = spawn_command(os.P_WAIT,argv0,argv,os.environ)
except OSError:
@@ -388,8 +408,10 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
so_flush()
se_flush()
- os.dup2(so_dup,so_fileno)
- os.dup2(se_dup,se_fileno)
+ if _so_has_fileno:
+ os.dup2(so_dup,so_fileno)
+ if _se_has_fileno:
+ os.dup2(se_dup,se_fileno)
fout.close()
fout = open_latin1(outfile,'r')
diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
index 2fc29f6d5..7fc6e1ae7 100644
--- a/numpy/distutils/extension.py
+++ b/numpy/distutils/extension.py
@@ -4,13 +4,18 @@ Provides the Extension class, used to describe C/C++ extension
modules in setup scripts.
Overridden to support f2py.
-"""
-__revision__ = "$Id: extension.py,v 1.1 2005/04/09 19:29:34 pearu Exp $"
+"""
+from __future__ import division, absolute_import, print_function
+import sys
+import re
from distutils.extension import Extension as old_Extension
-import re
+if sys.version_info[0] >= 3:
+ basestring = str
+
+
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 6f92af96c..37852b0fb 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -11,7 +11,9 @@ file, like 'gcc', that is executed, and should be a string. In contrast,
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
+
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['FCompiler','new_fcompiler','show_fcompilers',
'dummy_fortran_file']
@@ -540,7 +542,7 @@ class FCompiler(CCompiler):
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
- for key in self.executables.keys() + \
+ for key in list(self.executables.keys()) + \
['version','libraries','library_dirs',
'object_switch','compile_switch']:
if hasattr(self,key):
@@ -969,7 +971,7 @@ def get_f77flags(src):
flags = {}
f = open_latin1(src,'r')
i = 0
- for line in f.readlines():
+ for line in f:
i += 1
if i>20: break
m = _f77flags_re.match(line)
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
index e36f0ff78..03430fb0e 100644
--- a/numpy/distutils/fcompiler/absoft.py
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -5,6 +5,7 @@
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
+from __future__ import division, absolute_import, print_function
import os
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index a00d8bdb8..3831f88f7 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -1,5 +1,6 @@
#http://www.compaq.com/fortran/docs/
+from __future__ import division, absolute_import, print_function
import os
import sys
diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py
index 9352a0b7b..20c2c0d03 100644
--- a/numpy/distutils/fcompiler/g95.py
+++ b/numpy/distutils/fcompiler/g95.py
@@ -1,4 +1,5 @@
# http://g95.sourceforge.net/
+from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index bb832991f..3402319e0 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import re
import os
import sys
@@ -39,7 +41,7 @@ class GnuFCompiler(FCompiler):
m = re.match(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
- m = re.match(r'GNU Fortran.*?([0-9-.]+)', version_string)
+ m = re.match(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
@@ -370,10 +372,11 @@ def _can_target(cmd, arch):
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
+
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
- raw_input('Press ENTER to continue...')
+
try:
compiler = Gnu95FCompiler()
compiler.customize()
@@ -381,4 +384,3 @@ if __name__ == '__main__':
except Exception:
msg = get_exception()
print(msg)
- raw_input('Press ENTER to continue...')
diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py
index 866920ee5..5560e199a 100644
--- a/numpy/distutils/fcompiler/hpux.py
+++ b/numpy/distutils/fcompiler/hpux.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py
index 113134bbd..e5061bd18 100644
--- a/numpy/distutils/fcompiler/ibm.py
+++ b/numpy/distutils/fcompiler/ibm.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import re
import sys
@@ -43,8 +45,7 @@ class IBMFCompiler(FCompiler):
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
- l = os.listdir(xlf_dir)
- l.sort()
+ l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir,d,'xlf.cfg'))]
if l:
@@ -74,7 +75,7 @@ class IBMFCompiler(FCompiler):
log.info('Creating '+new_cfg)
fi = open(xlf_cfg,'r')
crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
- for line in fi.readlines():
+ for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 281bbe0cb..f6aa687a8 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -1,8 +1,8 @@
# http://developer.intel.com/software/products/compilers/flin/
+from __future__ import division, absolute_import, print_function
import sys
-from numpy.distutils.cpuinfo import cpu
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
@@ -162,21 +162,10 @@ class IntelVisualFCompiler(BaseIntelFCompiler):
return ['/4Yb','/d2']
def get_flags_opt(self):
- return ['/O1']
+ return ['/O2']
def get_flags_arch(self):
- opt = []
- if cpu.is_PentiumPro() or cpu.is_PentiumII():
- opt.extend(['/G6','/Qaxi'])
- elif cpu.is_PentiumIII():
- opt.extend(['/G6','/QaxK'])
- elif cpu.is_Pentium():
- opt.append('/G5')
- elif cpu.is_PentiumIV():
- opt.extend(['/G7','/QaxW'])
- if cpu.has_mmx():
- opt.append('/QaxM')
- return opt
+ return ["/arch:IA-32", "/QaxSSE3"]
class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelev'
@@ -203,6 +192,9 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler):
version_match = simple_version_match(start='Intel\(R\).*?64,')
+ def get_flags_arch(self):
+ return ["/arch:SSE2"]
+
if __name__ == '__main__':
from distutils import log
diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py
index cf2950624..afca8e422 100644
--- a/numpy/distutils/fcompiler/lahey.py
+++ b/numpy/distutils/fcompiler/lahey.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py
index 3c2e9ac84..21fa00642 100644
--- a/numpy/distutils/fcompiler/mips.py
+++ b/numpy/distutils/fcompiler/mips.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py
index 4aca48450..b02cf43a6 100644
--- a/numpy/distutils/fcompiler/nag.py
+++ b/numpy/distutils/fcompiler/nag.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py
index 526b42d49..039957233 100644
--- a/numpy/distutils/fcompiler/none.py
+++ b/numpy/distutils/fcompiler/none.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py
index 29881f4cf..1902bbc24 100644
--- a/numpy/distutils/fcompiler/pathf95.py
+++ b/numpy/distutils/fcompiler/pathf95.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
@@ -33,4 +35,4 @@ if __name__ == '__main__':
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='pathf95')
compiler.customize()
- print compiler.get_version()
+ print(compiler.get_version())
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index 6ea3c03d6..1cc201f22 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -1,5 +1,5 @@
-
# http://www.pgroup.com
+from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
from sys import platform
@@ -35,7 +35,7 @@ class PGroupFCompiler(FCompiler):
}
pic_flags = ['-fpic']
-
+
module_dir_switch = '-module '
module_include_switch = '-I'
@@ -46,7 +46,7 @@ class PGroupFCompiler(FCompiler):
return ['-fast']
def get_flags_debug(self):
return ['-g']
-
+
if platform == 'darwin':
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py
index 85e2c3377..07d4eeb8e 100644
--- a/numpy/distutils/fcompiler/sun.py
+++ b/numpy/distutils/fcompiler/sun.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler
diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py
index a7b99ce73..05bbc10ba 100644
--- a/numpy/distutils/fcompiler/vast.py
+++ b/numpy/distutils/fcompiler/vast.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index 413f0721d..9052cf74e 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -45,6 +45,7 @@ process_file(filename)
<ctypereal=float,double,\\0,\\1>
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['process_str','process_file']
@@ -64,13 +65,13 @@ def parse_structure(astr):
spanlist = []
ind = 0
- while 1:
+ while True:
m = routine_start_re.search(astr,ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr,start,m.end()):
- while 1:
+ while True:
i = astr.rfind('\n',ind,start)
if i==-1:
break
@@ -110,7 +111,7 @@ def conv(astr):
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
- allkeys = adict.keys()
+ allkeys = list(adict.keys())
done = False
n = 1
while not done:
@@ -165,10 +166,9 @@ def expand_sub(substr,names):
elif num == numsubs:
rules[r] = rule
else:
- print("Mismatch in number of replacements (base <%s=%s>)"\
- " for <%s=%s>. Ignoring." % (base_rule,
- ','.join(rules[base_rule]),
- r,thelist))
+ print("Mismatch in number of replacements (base <%s=%s>)"
+ " for <%s=%s>. Ignoring." %
+ (base_rule, ','.join(rules[base_rule]), r,thelist))
if not rules:
return substr
@@ -208,14 +208,14 @@ def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
- for line in fid.readlines():
+ for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
- print ('Including file',fn)
+ print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
diff --git a/numpy/distutils/info.py b/numpy/distutils/info.py
index 3d27a8092..2f5310665 100644
--- a/numpy/distutils/info.py
+++ b/numpy/distutils/info.py
@@ -1,5 +1,6 @@
"""
Enhanced distutils with Fortran compilers support and more.
"""
+from __future__ import division, absolute_import, print_function
postpone_import = True
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 9cff858ce..16fe3a5fb 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
diff --git a/numpy/distutils/interactive.py b/numpy/distutils/interactive.py
deleted file mode 100644
index e3dba04eb..000000000
--- a/numpy/distutils/interactive.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import os
-import sys
-from pprint import pformat
-
-__all__ = ['interactive_sys_argv']
-
-def show_information(*args):
- print 'Python',sys.version
- for a in ['platform','prefix','byteorder','path']:
- print 'sys.%s = %s' % (a,pformat(getattr(sys,a)))
- for a in ['name']:
- print 'os.%s = %s' % (a,pformat(getattr(os,a)))
- if hasattr(os,'uname'):
- print 'system,node,release,version,machine = ',os.uname()
-
-def show_environ(*args):
- for k,i in os.environ.items():
- print ' %s = %s' % (k, i)
-
-def show_fortran_compilers(*args):
- from fcompiler import show_fcompilers
- show_fcompilers()
-
-def show_compilers(*args):
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-def show_tasks(argv,ccompiler,fcompiler):
- print """\
-
-Tasks:
- i - Show python/platform/machine information
- ie - Show environment information
- c - Show C compilers information
- c<name> - Set C compiler (current:%s)
- f - Show Fortran compilers information
- f<name> - Set Fortran compiler (current:%s)
- e - Edit proposed sys.argv[1:].
-
-Task aliases:
- 0 - Configure
- 1 - Build
- 2 - Install
- 2<prefix> - Install with prefix.
- 3 - Inplace build
- 4 - Source distribution
- 5 - Binary distribution
-
-Proposed sys.argv = %s
- """ % (ccompiler, fcompiler, argv)
-
-
-import shlex
-
-def edit_argv(*args):
- argv = args[0]
- readline = args[1]
- if readline is not None:
- readline.add_history(' '.join(argv[1:]))
- try:
- s = raw_input('Edit argv [UpArrow to retrive %r]: ' % (' '.join(argv[1:])))
- except EOFError:
- return
- if s:
- argv[1:] = shlex.split(s)
- return
-
-def interactive_sys_argv(argv):
- print '='*72
- print 'Starting interactive session'
- print '-'*72
-
- readline = None
- try:
- try:
- import readline
- except ImportError:
- pass
- else:
- import tempfile
- tdir = tempfile.gettempdir()
- username = os.environ.get('USER',os.environ.get('USERNAME','UNKNOWN'))
- histfile = os.path.join(tdir,".pyhist_interactive_setup-" + username)
- try:
- try: readline.read_history_file(histfile)
- except IOError: pass
- import atexit
- atexit.register(readline.write_history_file, histfile)
- except AttributeError: pass
- except Exception, msg:
- print msg
-
- task_dict = {'i':show_information,
- 'ie':show_environ,
- 'f':show_fortran_compilers,
- 'c':show_compilers,
- 'e':edit_argv,
- }
- c_compiler_name = None
- f_compiler_name = None
-
- while 1:
- show_tasks(argv,c_compiler_name, f_compiler_name)
- try:
- task = raw_input('Choose a task (^D to quit, Enter to continue with setup): ')
- except EOFError:
- print
- task = 'quit'
- ltask = task.lower()
- if task=='': break
- if ltask=='quit': sys.exit()
- task_func = task_dict.get(ltask,None)
- if task_func is None:
- if ltask[0]=='c':
- c_compiler_name = task[1:]
- if c_compiler_name=='none':
- c_compiler_name = None
- continue
- if ltask[0]=='f':
- f_compiler_name = task[1:]
- if f_compiler_name=='none':
- f_compiler_name = None
- continue
- if task[0]=='2' and len(task)>1:
- prefix = task[1:]
- task = task[0]
- else:
- prefix = None
- if task == '4':
- argv[1:] = ['sdist','-f']
- continue
- elif task in '01235':
- cmd_opts = {'config':[],'config_fc':[],
- 'build_ext':[],'build_src':[],
- 'build_clib':[]}
- if c_compiler_name is not None:
- c = '--compiler=%s' % (c_compiler_name)
- cmd_opts['config'].append(c)
- if task != '0':
- cmd_opts['build_ext'].append(c)
- cmd_opts['build_clib'].append(c)
- if f_compiler_name is not None:
- c = '--fcompiler=%s' % (f_compiler_name)
- cmd_opts['config_fc'].append(c)
- if task != '0':
- cmd_opts['build_ext'].append(c)
- cmd_opts['build_clib'].append(c)
- if task=='3':
- cmd_opts['build_ext'].append('--inplace')
- cmd_opts['build_src'].append('--inplace')
- conf = []
- sorted_keys = ['config','config_fc','build_src',
- 'build_clib','build_ext']
- for k in sorted_keys:
- opts = cmd_opts[k]
- if opts: conf.extend([k]+opts)
- if task=='0':
- if 'config' not in conf:
- conf.append('config')
- argv[1:] = conf
- elif task=='1':
- argv[1:] = conf+['build']
- elif task=='2':
- if prefix is not None:
- argv[1:] = conf+['install','--prefix=%s' % (prefix)]
- else:
- argv[1:] = conf+['install']
- elif task=='3':
- argv[1:] = conf+['build']
- elif task=='5':
- if sys.platform=='win32':
- argv[1:] = conf+['bdist_wininst']
- else:
- argv[1:] = conf+['bdist']
- else:
- print 'Skipping unknown task:',`task`
- else:
- print '-'*68
- try:
- task_func(argv,readline)
- except Exception,msg:
- print 'Failed running task %s: %s' % (task,msg)
- break
- print '-'*68
- print
-
- print '-'*72
- return argv
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
index a486b13bd..7316547a3 100644
--- a/numpy/distutils/lib2def.py
+++ b/numpy/distutils/lib2def.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import re
import sys
import os
@@ -46,8 +48,8 @@ libfile, deffile = parse_cmd()"""
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
- print "I'm assuming that your first argument is the library"
- print "and the second is the DEF file."
+ print("I'm assuming that your first argument is the library")
+ print("and the second is the DEF file.")
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py
index 4e6c1f38e..fe1efdca6 100644
--- a/numpy/distutils/line_endings.py
+++ b/numpy/distutils/line_endings.py
@@ -1,28 +1,30 @@
""" Functions for converting from DOS to UNIX line endings
+
"""
+from __future__ import division, absolute_import, print_function
import sys, re, os
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
if os.path.isdir(file):
- print file, "Directory!"
+ print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
- print file, "Binary!"
+ print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
- print 'dos2unix:', file
+ print('dos2unix:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
- print file, 'ok'
+ print(file, 'ok')
def dos2unix_one_dir(modified_files,dir_name,file_names):
for file in file_names:
@@ -40,23 +42,23 @@ def dos2unix_dir(dir_name):
def unix2dos(file):
"Replace LF with CRLF in argument files. Print names of changed files."
if os.path.isdir(file):
- print file, "Directory!"
+ print(file, "Directory!")
return
data = open(file, "rb").read()
if '\0' in data:
- print file, "Binary!"
+ print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
- print 'unix2dos:', file
+ print('unix2dos:', file)
f = open(file, "wb")
f.write(newdata)
f.close()
return file
else:
- print file, 'ok'
+ print(file, 'ok')
def unix2dos_one_dir(modified_files,dir_name,file_names):
for file in file_names:
diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py
index fe44bb443..d77f98c63 100644
--- a/numpy/distutils/log.py
+++ b/numpy/distutils/log.py
@@ -1,4 +1,5 @@
# Colored log, requires Python 2.3 or up.
+from __future__ import division, absolute_import, print_function
import sys
from distutils.log import *
@@ -6,9 +7,11 @@ from distutils.log import Log as old_Log
from distutils.log import _global_log
if sys.version_info[0] < 3:
- from misc_util import red_text, default_text, cyan_text, green_text, is_sequence, is_string
+ from .misc_util import (red_text, default_text, cyan_text, green_text,
+ is_sequence, is_string)
else:
- from numpy.distutils.misc_util import red_text, default_text, cyan_text, green_text, is_sequence, is_string
+ from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
+ green_text, is_sequence, is_string)
def _fix_args(args,flag=1):
@@ -18,6 +21,7 @@ def _fix_args(args,flag=1):
return tuple([_fix_args(a,flag=0) for a in args])
return args
+
class Log(old_Log):
def _log(self, level, msg, args):
if level >= self.threshold:
@@ -32,8 +36,10 @@ class Log(old_Log):
sys.stdout.flush()
def good(self, msg, *args):
- """If we'd log WARN messages, log this message as a 'nice' anti-warn
+ """
+ If we log WARN messages, log this message as a 'nice' anti-warn
message.
+
"""
if WARN >= self.threshold:
if args:
@@ -41,6 +47,8 @@ class Log(old_Log):
else:
print(green_text(msg))
sys.stdout.flush()
+
+
_global_log.__class__ = Log
good = _global_log.good
@@ -52,11 +60,14 @@ def set_threshold(level, force=False):
# likely a good reason why we're running at this level.
_global_log.threshold = level
if level <= DEBUG:
- info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument')
+ info('set_threshold: setting threshold to DEBUG level,'
+ ' it can be changed only with force argument')
else:
- info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level))
+ info('set_threshold: not changing threshold from DEBUG level'
+ ' %s to %s' % (prev_level, level))
return prev_level
+
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
if v < 0:
@@ -69,6 +80,7 @@ def set_verbosity(v, force=False):
set_threshold(DEBUG, force)
return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1)
+
_global_color_map = {
DEBUG:cyan_text,
INFO:default_text,
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 5b9aa3356..75687be84 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -7,9 +7,9 @@ Support code for building Python extensions on Windows.
# 3. Force windows to use g77
"""
+from __future__ import division, absolute_import, print_function
import os
-import subprocess
import sys
import subprocess
import re
@@ -18,7 +18,7 @@ import re
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
- import log
+ from . import log
else:
from numpy.distutils import log
# NT stuff
@@ -199,10 +199,7 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
- if sys.version_info[0] >= 3:
- func(*args[:func.__code__.co_argcount])
- else:
- func(*args[:func.im_func.func_code.co_argcount])
+ func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
@@ -243,7 +240,7 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
def find_python_dll():
maj, min, micro = [int(i) for i in sys.version_info[:3]]
dllname = 'python%d%d.dll' % (maj, min)
- print ("Looking for %s" % dllname)
+ print("Looking for %s" % dllname)
# We can't do much here:
# - find it in python main dir
@@ -461,13 +458,16 @@ _MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
- if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
- _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION
- else:
- _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
+ _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
+ # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP:
+ _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
+ major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".",2)
+ _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
+ del major, minor, rest
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what to do
# in that case: manifest building will fail, but it should not be used in
@@ -512,10 +512,13 @@ def manifest_rc(name, type='dll'):
'exe').
Parameters
- ---------- name: str
+ ----------
+ name : str
name of the manifest file to embed
- type: str ('dll', 'exe')
- type of the binary which will embed the manifest"""
+ type : str {'dll', 'exe'}
+ type of the binary which will embed the manifest
+
+ """
if type == 'dll':
rctype = 2
elif type == 'exe':
@@ -533,7 +536,10 @@ def check_embedded_msvcr_match_linked(msver):
# embedding
msvcv = msvc_runtime_library()
if msvcv:
- maj = int(msvcv[5:6])
+ assert msvcv.startswith("msvcr"), msvcv
+ # Dealing with something like "mscvr90" or "mscvr100", the last
+ # last digit is the minor release, want int("9") or int("10"):
+ maj = int(msvcv[5:-1])
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index ea3d5f325..bc073aee4 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import re
import sys
@@ -165,7 +167,7 @@ def get_mathlibs(path=None):
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
- for line in fid.readlines():
+ for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
@@ -218,8 +220,8 @@ def _fix_paths(paths,local_path,include_non_existing):
else:
if include_non_existing:
new_paths.append(n)
- print('could not resolve pattern in %r: %r' \
- % (local_path,n))
+ print('could not resolve pattern in %r: %r' %
+ (local_path,n))
else:
n2 = njoin(local_path,n)
if os.path.exists(n2):
@@ -230,8 +232,8 @@ def _fix_paths(paths,local_path,include_non_existing):
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
- print('non-existing path in %r: %r' \
- % (local_path,n))
+ print('non-existing path in %r: %r' %
+ (local_path,n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n,local_path,include_non_existing))
@@ -359,22 +361,12 @@ def msvc_runtime_library():
'1310' : 'msvcr71', # MSVC 7.1
'1400' : 'msvcr80', # MSVC 8
'1500' : 'msvcr90', # MSVC 9 (VS 2008)
+ '1600' : 'msvcr100', # MSVC 10 (aka 2010)
}.get(msc_ver, None)
else:
lib = None
return lib
-def msvc_on_amd64():
- if not (sys.platform=='win32' or os.name=='nt'):
- return
- if get_build_architecture() != 'AMD64':
- return
- if 'DISTUTILS_USE_SDK' in os.environ:
- return
- # try to avoid _MSVCCompiler__root attribute error
- print('Forcing DISTUTILS_USE_SDK=1')
- os.environ['DISTUTILS_USE_SDK']='1'
- return
#########################
@@ -391,8 +383,7 @@ def _get_f90_modules(source):
return []
modules = []
f = open(source,'r')
- f_readlines = getattr(f,'xreadlines',f.readlines)
- for line in f_readlines():
+ for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
@@ -554,7 +545,7 @@ def general_source_directories_files(top_path):
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
- sources = filter(is_string, ext.sources)
+ sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
@@ -565,13 +556,13 @@ def get_ext_source_files(ext):
return filenames
def get_script_files(scripts):
- scripts = filter(is_string, scripts)
+ scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources',[])
- sources = filter(is_string, sources)
+ sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends',[])
@@ -603,11 +594,29 @@ def get_shared_lib_extension(is_python_ext=False):
Linux, but not on OS X.
"""
- so_ext = distutils.sysconfig.get_config_var('SO') or ''
- # fix long extension for Python >=3.2, see PEP 3149.
- if (not is_python_ext) and 'SOABI' in distutils.sysconfig.get_config_vars():
- # Does nothing unless SOABI config var exists
- so_ext = so_ext.replace('.' + distutils.sysconfig.get_config_var('SOABI'), '', 1)
+ confvars = distutils.sysconfig.get_config_vars()
+ # SO is deprecated in 3.3.1, use EXT_SUFFIX instead
+ so_ext = confvars.get('EXT_SUFFIX', None)
+ if so_ext is None:
+ so_ext = confvars.get('SO', '')
+
+ if not is_python_ext:
+ # hardcode known values, config vars (including SHLIB_SUFFIX) are
+ # unreliable (see #3182)
+ # darwin, windows and debug linux are wrong in 3.3.1 and older
+ if (sys.platform.startswith('linux') or
+ sys.platform.startswith('gnukfreebsd')):
+ so_ext = '.so'
+ elif sys.platform.startswith('darwin'):
+ so_ext = '.dylib'
+ elif sys.platform.startswith('win'):
+ so_ext = '.dll'
+ else:
+ # fall back to config vars for unknown platforms
+ # fix long extension for Python >=3.2, see PEP 3149.
+ if 'SOABI' in confvars:
+ # Does nothing unless SOABI config var exists
+ so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
@@ -625,7 +634,7 @@ def get_data_files(data):
if os.path.isfile(s):
filenames.append(s)
else:
- print('Not existing data file:',s)
+ print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
@@ -644,57 +653,14 @@ def get_frame(level=0):
frame = frame.f_back
return frame
-class SconsInfo(object):
- """
- Container object holding build info for building a package with scons.
-
- Parameters
- ----------
- scons_path : str or None
- Path to scons script, relative to the directory of setup.py.
- If None, no scons script is specified. This can be useful to add only
- pre- and post-hooks to a configuration.
- parent_name : str or None
- Name of the parent package (for example "numpy").
- pre_hook : sequence of callables or None
- Callables that are executed before scons is invoked.
- Each callable should be defined as ``callable(*args, **kw)``.
- post_hook : sequence of callables or None
- Callables that are executed after scons is invoked.
- Each callable should be defined as ``callable(*args, **kw)``.
- source_files : list of str or None
- List of paths to source files, relative to the directory of setup.py.
- pkg_path : str or None
- Path to the package for which the `SconsInfo` instance holds the
- build info, relative to the directory of setup.py.
-
- Notes
- -----
- All parameters are available as attributes of a `SconsInfo` instance.
-
- """
- def __init__(self, scons_path, parent_name, pre_hook,
- post_hook, source_files, pkg_path):
- self.scons_path = scons_path
- self.parent_name = parent_name
- self.pre_hook = pre_hook
- self.post_hook = post_hook
- self.source_files = source_files
- if pkg_path:
- self.pkg_path = pkg_path
- else:
- if scons_path:
- self.pkg_path = os.path.dirname(scons_path)
- else:
- self.pkg_path = ''
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
- 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data',
- 'installed_libraries']
+ 'libraries', 'headers', 'scripts', 'py_modules',
+ 'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
@@ -850,7 +816,7 @@ class Configuration(object):
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
- dirs = filter(os.path.isdir,glob.glob(subpackage_path))
+ dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d,'__init__.py')):
@@ -892,7 +858,7 @@ class Configuration(object):
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
- if setup_module.configuration.func_code.co_argcount > 1:
+ if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
@@ -919,14 +885,14 @@ class Configuration(object):
Parameters
----------
- subpackage_name: str,None
+ subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
- subpackage_path: str
+ subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
- parent_name: str
+ parent_name : str
Parent name.
"""
if subpackage_name is None:
@@ -982,13 +948,13 @@ class Configuration(object):
Parameters
----------
- subpackage_name: str
+ subpackage_name : str
name of the subpackage
- subpackage_path: str
+ subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
- standalone: bool
+ standalone : bool
"""
if standalone:
@@ -1026,10 +992,10 @@ class Configuration(object):
Parameters
----------
- data_path: seq,str
+ data_path : seq or str
Argument can be either
- * 2-sequence (<datadir suffix>,<path to data directory>)
+ * 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
@@ -1088,14 +1054,14 @@ class Configuration(object):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
- rl = range(len(pattern_list)-1); rl.reverse()
+ rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
- print('Not a directory, skipping',path)
+ print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
@@ -1148,7 +1114,7 @@ class Configuration(object):
Parameters
----------
- files: sequence
+ files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
@@ -1296,6 +1262,22 @@ class Configuration(object):
### XXX Implement add_py_modules
+ def add_define_macros(self, macros):
+ """Add define macros to configuration
+
+ Add the given sequence of macro name and value duples to the beginning
+ of the define_macros list This list will be visible to all extension
+ modules of the current package.
+ """
+ dist = self.get_distribution()
+ if dist is not None:
+ if not hasattr(dist, 'define_macros'):
+ dist.define_macros = []
+ dist.define_macros.extend(macros)
+ else:
+ self.define_macros.extend(macros)
+
+
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
@@ -1327,7 +1309,7 @@ class Configuration(object):
Parameters
----------
- files: str, seq
+ files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
@@ -1382,9 +1364,9 @@ class Configuration(object):
Parameters
----------
- name: str
+ name : str
name of the extension
- sources: seq
+ sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
@@ -1392,28 +1374,28 @@ class Configuration(object):
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
- include_dirs:
- define_macros:
- undef_macros:
- library_dirs:
- libraries:
- runtime_library_dirs:
- extra_objects:
- extra_compile_args:
- extra_link_args:
- extra_f77_compile_args:
- extra_f90_compile_args:
- export_symbols:
- swig_opts:
- depends:
+ include_dirs :
+ define_macros :
+ undef_macros :
+ library_dirs :
+ libraries :
+ runtime_library_dirs :
+ extra_objects :
+ extra_compile_args :
+ extra_link_args :
+ extra_f77_compile_args :
+ extra_f90_compile_args :
+ export_symbols :
+ swig_opts :
+ depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
- language:
- f2py_options:
- module_dirs:
- extra_info: dict,list
+ language :
+ f2py_options :
+ module_dirs :
+ extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
@@ -1463,6 +1445,8 @@ class Configuration(object):
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
+ ext_args['define_macros'] = \
+ self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
@@ -1650,65 +1634,6 @@ class Configuration(object):
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
- def add_scons_installed_library(self, name, install_dir):
- """
- Add a scons-built installable library to distutils.
-
- Parameters
- ----------
- name : str
- The name of the library.
- install_dir : str
- Path to install the library, relative to the current sub-package.
-
- """
- install_dir = os.path.join(self.package_path, install_dir)
- self.installed_libraries.append(InstallableLib(name, {}, install_dir))
-
- def add_sconscript(self, sconscript, subpackage_path=None,
- standalone = False, pre_hook = None,
- post_hook = None, source_files = None, package_path=None):
- """Add a sconscript to configuration.
-
- pre_hook and post hook should be sequences of callable, which will be
- use before and after executing scons. The callable should be defined as
- callable(*args, **kw). It is ugly, but well, hooks are ugly anyway...
-
- sconscript can be None, which can be useful to add only post/pre
- hooks."""
- if standalone:
- parent_name = None
- else:
- parent_name = self.name
-
- dist = self.get_distribution()
- # Convert the sconscript name to a relative filename (relative from top
- # setup.py's directory)
- fullsconsname = self.paths(sconscript)[0]
-
- # XXX: Think about a way to automatically register source files from
- # scons...
- full_source_files = []
- if source_files:
- full_source_files.extend([self.paths(i)[0] for i in source_files])
-
- scons_info = SconsInfo(fullsconsname, parent_name,
- pre_hook, post_hook,
- full_source_files, package_path)
- if dist is not None:
- if dist.scons_data is None:
- dist.scons_data = []
- dist.scons_data.append(scons_info)
- self.warn('distutils distribution has been initialized,'\
- ' it may be too late to add a subpackage '+ subpackage_name)
- # XXX: we add a fake extension, to correctly initialize some
- # options in distutils command.
- dist.add_extension('', sources = [])
- else:
- self.scons_data.append(scons_info)
- # XXX: we add a fake extension, to correctly initialize some
- # options in distutils command.
- self.add_extension('', sources = [])
def add_scripts(self,*files):
"""Add scripts to configuration.
@@ -2083,11 +2008,6 @@ class Configuration(object):
"""
self.py_modules.append((self.name,name,generate_config_py))
- def scons_make_config_py(self, name = '__config__'):
- """Generate package __config__.py file containing system_info
- information used during building the package.
- """
- self.py_modules.append((self.name, name, scons_generate_config_py))
def get_info(self,*names):
"""Get resources information.
@@ -2095,7 +2015,7 @@ class Configuration(object):
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
- from system_info import get_info, dict_append
+ from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
@@ -2230,57 +2150,18 @@ def get_info(pkgname, dirs=None):
return info
def is_bootstrapping():
- import __builtin__
+ if sys.version_info[0] >= 3:
+ import builtins
+ else:
+ import __builtin__ as builtins
+
try:
- __builtin__.__NUMPY_SETUP__
+ builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
-def scons_generate_config_py(target):
- """generate config.py file containing system_info information
- used during building the package.
-
- usage:
- config['py_modules'].append((packagename, '__config__',generate_config_py))
- """
- from distutils.dir_util import mkpath
- from numscons import get_scons_configres_dir, get_scons_configres_filename
- d = {}
- mkpath(os.path.dirname(target))
- f = open(target, 'w')
- f.write('# this file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
- f.write('# it contains system_info results at the time of building this package.\n')
- f.write('__all__ = ["show"]\n\n')
- confdir = get_scons_configres_dir()
- confilename = get_scons_configres_filename()
- for root, dirs, files in os.walk(confdir):
- if files:
- file = os.path.join(root, confilename)
- assert root.startswith(confdir)
- pkg_name = '.'.join(root[len(confdir)+1:].split(os.sep))
- fid = open(file, 'r')
- try:
- cnt = fid.read()
- d[pkg_name] = eval(cnt)
- finally:
- fid.close()
- # d is a dictionary whose keys are package names, and values the
- # corresponding configuration. Each configuration is itself a dictionary
- # (lib : libinfo)
- f.write('_config = %s\n' % d)
- f.write(r'''
-def show():
- for pkg, config in _config.items():
- print("package %s configuration:" % pkg)
- for lib, libc in config.items():
- print(' %s' % lib)
- for line in libc.split('\n'):
- print('\t%s' % line)
- ''')
- f.close()
- return target
#########################
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 4f64623ed..ceab906a4 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -1,11 +1,14 @@
+from __future__ import division, absolute_import, print_function
+
import sys
+import re
+import os
+import shlex
+
if sys.version_info[0] < 3:
from ConfigParser import SafeConfigParser, NoOptionError
else:
from configparser import ConfigParser, SafeConfigParser, NoOptionError
-import re
-import os
-import shlex
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
@@ -140,7 +143,7 @@ class LibraryInfo(object):
The list of section headers.
"""
- return self._sections.keys()
+ return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
@@ -219,7 +222,7 @@ class VariableSet(object):
The names of all variables in the `VariableSet` instance.
"""
- return self._raw_data.keys()
+ return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
@@ -238,11 +241,11 @@ def parse_meta(config):
d[name] = value
for k in ['name', 'description', 'version']:
- if not d.has_key(k):
+ if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
- if not d.has_key('requires'):
+ if not 'requires' in d:
d['requires'] = []
return d
@@ -313,7 +316,7 @@ def _read_config_imp(filenames, dirs=None):
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
- if not vars.has_key(k):
+ if not k in vars:
vars[k] = v
# Update sec dict
@@ -328,7 +331,7 @@ def _read_config_imp(filenames, dirs=None):
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
- if not vars.has_key("pkgdir") and vars.has_key("pkgname"):
+ if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
@@ -426,7 +429,7 @@ if __name__ == '__main__':
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
- print ("%s\t%s - %s" % (info.name, info.name, info.description))
+ print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
@@ -452,10 +455,10 @@ if __name__ == '__main__':
info.vars[name] = value
if options.cflags:
- print (info.cflags(section))
+ print(info.cflags(section))
if options.libs:
- print (info.libs(section))
+ print(info.libs(section))
if options.version:
- print (info.version)
+ print(info.version)
if options.min_version:
- print (info.version >= options.min_version)
+ print(info.version >= options.min_version)
diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py
index ea8182659..6ae19d16b 100644
--- a/numpy/distutils/numpy_distribution.py
+++ b/numpy/distutils/numpy_distribution.py
@@ -1,4 +1,6 @@
# XXX: Handle setuptools ?
+from __future__ import division, absolute_import, print_function
+
from distutils.core import Distribution
# This class is used because we add new files (sconscripts, and so on) with the
diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py
index 48051810e..fc9872db3 100644
--- a/numpy/distutils/pathccompiler.py
+++ b/numpy/distutils/pathccompiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from distutils.unixccompiler import UnixCCompiler
class PathScaleCCompiler(UnixCCompiler):
diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py
index afc1fadd2..e4dbe240b 100644
--- a/numpy/distutils/setup.py
+++ b/numpy/distutils/setup.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/distutils/setupscons.py b/numpy/distutils/setupscons.py
deleted file mode 100644
index 938f07ead..000000000
--- a/numpy/distutils/setupscons.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-import os.path
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('distutils',parent_package,top_path)
- config.add_subpackage('command')
- config.add_subpackage('fcompiler')
- config.add_data_dir('tests')
- if os.path.exists("site.cfg"):
- config.add_data_files('site.cfg')
- config.make_config_py()
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 9c3cee3cb..13f52801e 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -108,7 +108,9 @@ terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
"""
+from __future__ import division, absolute_import, print_function
import sys
import os
@@ -116,6 +118,8 @@ import re
import copy
import warnings
from glob import glob
+from functools import reduce
+
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError, ConfigParser
else:
@@ -134,8 +138,6 @@ from numpy.distutils.misc_util import is_sequence, is_string, \
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
-if sys.version_info[0] >= 3:
- from functools import reduce
# Determine number of bits
import platform
@@ -214,14 +216,34 @@ else:
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
+ import subprocess as sp
+ try:
+ # Explicitly open/close file to avoid ResourceWarning when
+ # tests are run in debug mode Python 3.
+ tmp = open(os.devnull, 'w')
+ p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
+ stderr=tmp)
+ except (OSError, DistutilsError):
+ # OSError if gcc is not installed, or SandboxViolation (DistutilsError
+ # subclass) if an old setuptools bug is triggered (see gh-3160).
+ pass
+ else:
+ triplet = str(p.communicate()[0].decode().strip())
+ if p.returncode == 0:
+ # gcc supports the "-print-multiarch" option
+ default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+ default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+ finally:
+ tmp.close()
+
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
-default_lib_dirs = filter(os.path.isdir, default_lib_dirs)
-default_include_dirs = filter(os.path.isdir, default_include_dirs)
-default_src_dirs = filter(os.path.isdir, default_src_dirs)
+default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
+default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
+default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
@@ -884,7 +906,7 @@ class mkl_info(system_info):
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
- for d in open(ld_so_conf, 'r').readlines():
+ for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
@@ -909,7 +931,7 @@ class mkl_info(system_info):
if mklroot is None:
system_info.__init__(self)
else:
- from cpuinfo import cpu
+ from .cpuinfo import cpu
l = 'mkl' # use shared library
if cpu.is_Itanium():
plt = '64'
@@ -1623,6 +1645,9 @@ class _numpy_info(system_info):
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
+ py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ if py_pincl_dir not in include_dirs:
+ include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
@@ -1651,7 +1676,7 @@ class _numpy_info(system_info):
## (self.modulename.upper()+'_VERSION_HEX',
## hex(vstr2hex(module.__version__))),
## )
-## except Exception,msg:
+## except Exception as msg:
## print msg
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
@@ -1754,12 +1779,15 @@ class boost_python_info(system_info):
break
if not src_dir:
return
- py_incl_dir = distutils.sysconfig.get_python_inc()
+ py_incl_dirs = [distutils.sysconfig.get_python_inc()]
+ py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ if py_pincl_dir not in py_incl_dirs:
+ py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
- {'include_dirs': [src_dir, py_incl_dir],
+ {'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
@@ -2113,7 +2141,7 @@ def show_all(argv=None):
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
- for name, c in _gdict_.iteritems():
+ for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
diff --git a/numpy/distutils/tests/f2py_ext/__init__.py b/numpy/distutils/tests/f2py_ext/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/distutils/tests/f2py_ext/__init__.py
+++ b/numpy/distutils/tests/f2py_ext/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py
index e3dfddb74..658b01bb6 100644
--- a/numpy/distutils/tests/f2py_ext/setup.py
+++ b/numpy/distutils/tests/f2py_ext/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('f2py_ext',parent_package,top_path)
diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
index 027f455de..bee1b9870 100644
--- a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
+++ b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from f2py_ext import fib2
diff --git a/numpy/distutils/tests/f2py_f90_ext/__init__.py b/numpy/distutils/tests/f2py_f90_ext/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/distutils/tests/f2py_f90_ext/__init__.py
+++ b/numpy/distutils/tests/f2py_f90_ext/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py
index ee56cc3a6..4b4c2390c 100644
--- a/numpy/distutils/tests/f2py_f90_ext/setup.py
+++ b/numpy/distutils/tests/f2py_f90_ext/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('f2py_f90_ext',parent_package,top_path)
diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
index 1543051dc..92e559291 100644
--- a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
+++ b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from f2py_f90_ext import foo
diff --git a/numpy/distutils/tests/gen_ext/__init__.py b/numpy/distutils/tests/gen_ext/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/distutils/tests/gen_ext/__init__.py
+++ b/numpy/distutils/tests/gen_ext/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py
index bf029062c..ccab2a45e 100644
--- a/numpy/distutils/tests/gen_ext/setup.py
+++ b/numpy/distutils/tests/gen_ext/setup.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+from __future__ import division, print_function
fib3_f = '''
C FILE: FIB3.F
diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py
index 8a9a443a5..7bab5a6be 100644
--- a/numpy/distutils/tests/gen_ext/tests/test_fib3.py
+++ b/numpy/distutils/tests/gen_ext/tests/test_fib3.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from gen_ext import fib3
diff --git a/numpy/distutils/tests/pyrex_ext/__init__.py b/numpy/distutils/tests/pyrex_ext/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/distutils/tests/pyrex_ext/__init__.py
+++ b/numpy/distutils/tests/pyrex_ext/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/pyrex_ext/setup.py b/numpy/distutils/tests/pyrex_ext/setup.py
index 5b348b916..ad1f5207a 100644
--- a/numpy/distutils/tests/pyrex_ext/setup.py
+++ b/numpy/distutils/tests/pyrex_ext/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('pyrex_ext',parent_package,top_path)
diff --git a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py b/numpy/distutils/tests/pyrex_ext/tests/test_primes.py
index eb2c91da7..c9fdd6c6d 100644
--- a/numpy/distutils/tests/pyrex_ext/tests/test_primes.py
+++ b/numpy/distutils/tests/pyrex_ext/tests/test_primes.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from pyrex_ext.primes import primes
diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py
index 89d73800e..61f6ba751 100644
--- a/numpy/distutils/tests/setup.py
+++ b/numpy/distutils/tests/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testnumpydistutils',parent_package,top_path)
diff --git a/numpy/distutils/tests/swig_ext/__init__.py b/numpy/distutils/tests/swig_ext/__init__.py
index e69de29bb..1d0f69b67 100644
--- a/numpy/distutils/tests/swig_ext/__init__.py
+++ b/numpy/distutils/tests/swig_ext/__init__.py
@@ -0,0 +1 @@
+from __future__ import division, absolute_import, print_function
diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py
index 7f0dbe627..7ce3061a1 100644
--- a/numpy/distutils/tests/swig_ext/setup.py
+++ b/numpy/distutils/tests/swig_ext/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('swig_ext',parent_package,top_path)
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py
index 9afc01cb2..3164c0831 100644
--- a/numpy/distutils/tests/swig_ext/tests/test_example.py
+++ b/numpy/distutils/tests/swig_ext/tests/test_example.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from swig_ext import example
diff --git a/numpy/distutils/tests/swig_ext/tests/test_example2.py b/numpy/distutils/tests/swig_ext/tests/test_example2.py
index 42d1fcbcd..82daed728 100644
--- a/numpy/distutils/tests/swig_ext/tests/test_example2.py
+++ b/numpy/distutils/tests/swig_ext/tests/test_example2.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
from numpy.testing import *
from swig_ext import example2
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
new file mode 100644
index 000000000..0931f749b
--- /dev/null
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -0,0 +1,92 @@
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+from tempfile import TemporaryFile
+
+from numpy.distutils import exec_command
+
+# In python 3 stdout, stderr are text (unicode compliant) devices, so to
+# emulate them import StringIO from the io module.
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+class redirect_stdout(object):
+ """Context manager to redirect stdout for exec_command test."""
+ def __init__(self, stdout=None):
+ self._stdout = stdout or sys.stdout
+
+ def __enter__(self):
+ self.old_stdout = sys.stdout
+ sys.stdout = self._stdout
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stdout.flush()
+ sys.stdout = self.old_stdout
+ # note: closing sys.stdout won't close it.
+ self._stdout.close()
+
+class redirect_stderr(object):
+ """Context manager to redirect stderr for exec_command test."""
+ def __init__(self, stderr=None):
+ self._stderr = stderr or sys.stderr
+
+ def __enter__(self):
+ self.old_stderr = sys.stderr
+ sys.stderr = self._stderr
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stderr.flush()
+ sys.stderr = self.old_stderr
+ # note: closing sys.stderr won't close it.
+ self._stderr.close()
+
+class emulate_nonposix(object):
+ """Context manager to emulate os.name != 'posix' """
+ def __init__(self, osname='non-posix'):
+ self._new_name = osname
+
+ def __enter__(self):
+ self._old_name = os.name
+ os.name = self._new_name
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ os.name = self._old_name
+
+
+def test_exec_command_stdout():
+ # Regression test for gh-2999 and gh-2915.
+ # There are several packages (nose, scipy.weave.inline, Sage inline
+ # Fortran) that replace stdout, in which case it doesn't have a fileno
+ # method. This is tested here, with a do-nothing command that fails if the
+ # presence of fileno() is assumed in exec_command.
+
+ # The code has a special case for posix systems, so if we are on posix test
+ # both that the special case works and that the generic code works.
+
+ # Test posix version:
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ exec_command.exec_command("cd '.'")
+
+def test_exec_command_stderr():
+ # Test posix version:
+ with redirect_stdout(TemporaryFile(mode='w+')):
+ with redirect_stderr(StringIO()):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(TemporaryFile()):
+ with redirect_stderr(StringIO()):
+ exec_command.exec_command("cd '.'")
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index 6a36fb160..a0d191819 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy.distutils.fcompiler
@@ -17,6 +19,7 @@ gfortran_version_strings = [
('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
+ ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
]
class TestG77Versions(TestCase):
diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py
index ad03daeea..eda209ebe 100644
--- a/numpy/distutils/tests/test_fcompiler_intel.py
+++ b/numpy/distutils/tests/test_fcompiler_intel.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy.distutils.fcompiler
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index 448800b68..33b6b1213 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -1,7 +1,9 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
from numpy.testing import *
-from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
+from numpy.distutils.misc_util import appendpath, minrelpath, \
+ gpaths, get_shared_lib_extension
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
@@ -49,10 +51,25 @@ class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__),'..'))
ls = gpaths('command/*.py', local_path)
- assert_(join(local_path,'command','build_src.py') in ls,`ls`)
+ assert_(join(local_path,'command','build_src.py') in ls,repr(ls))
f = gpaths('system_info.py', local_path)
- assert_(join(local_path,'system_info.py')==f[0],`f`)
+ assert_(join(local_path,'system_info.py')==f[0],repr(f))
+class TestSharedExtension(TestCase):
+
+ def test_get_shared_lib_extension(self):
+ import sys
+ ext = get_shared_lib_extension(is_python_ext=False)
+ if sys.platform.startswith('linux'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('gnukfreebsd'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('darwin'):
+ assert_equal(ext, '.dylib')
+ elif sys.platform.startswith('win'):
+ assert_equal(ext, '.dll')
+ # just check for no crash
+ assert_(get_shared_lib_extension(is_python_ext=True))
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index 6122e303b..5443ece48 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
from tempfile import mkstemp
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index dfc5a676f..95b676ecf 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -1,6 +1,8 @@
"""
unixccompiler - can handle very long argument lists for ar.
+
"""
+from __future__ import division, absolute_import, print_function
import os
@@ -10,7 +12,7 @@ from numpy.distutils.ccompiler import replace_method
from numpy.distutils.compat import get_exception
if sys.version_info[0] < 3:
- import log
+ from . import log
else:
from numpy.distutils import log
diff --git a/numpy/doc/__init__.py b/numpy/doc/__init__.py
index 6589b5492..b6f1fa71c 100644
--- a/numpy/doc/__init__.py
+++ b/numpy/doc/__init__.py
@@ -1,10 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
import os
ref_dir = os.path.join(os.path.dirname(__file__))
-__all__ = [f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and
- not f.startswith('__')]
-__all__.sort()
+__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and
+ not f.startswith('__'))
for f in __all__:
__import__(__name__ + '.' + f)
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 97e982204..75f0995d8 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -9,32 +9,39 @@ Array types and conversions between types
Numpy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
-========== =========================================================
+========== ==========================================================
Data type Description
-========== =========================================================
-bool Boolean (True or False) stored as a byte
-int Platform integer (normally either ``int32`` or ``int64``)
+========== ==========================================================
+bool_ Boolean (True or False) stored as a byte
+int_ Default integer type (same as C ``long``; normally either
+ ``int64`` or ``int32``)
+intc Identical to C ``int`` (normally ``int32`` or ``int64``)
+intp Integer used for indexing (same as C ``ssize_t``; normally
+ either ``int32`` or ``int64``)
int8 Byte (-128 to 127)
int16 Integer (-32768 to 32767)
int32 Integer (-2147483648 to 2147483647)
-int64 Integer (9223372036854775808 to 9223372036854775807)
+int64 Integer (-9223372036854775808 to 9223372036854775807)
uint8 Unsigned integer (0 to 255)
uint16 Unsigned integer (0 to 65535)
uint32 Unsigned integer (0 to 4294967295)
uint64 Unsigned integer (0 to 18446744073709551615)
-float Shorthand for ``float64``.
+float_ Shorthand for ``float64``.
float16 Half precision float: sign bit, 5 bits exponent,
10 bits mantissa
float32 Single precision float: sign bit, 8 bits exponent,
23 bits mantissa
float64 Double precision float: sign bit, 11 bits exponent,
52 bits mantissa
-complex Shorthand for ``complex128``.
+complex_ Shorthand for ``complex128``.
complex64 Complex number, represented by two 32-bit floats (real
and imaginary components)
complex128 Complex number, represented by two 64-bit floats (real
and imaginary components)
-========== =========================================================
+========== ==========================================================
+
+Additionally to ``intc`` the platform dependent C integer types ``short``,
+``long``, ``longlong`` and their unsigned versions are defined.
Numpy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
@@ -43,7 +50,7 @@ having unique characteristics. Once you have imported NumPy using
>>> import numpy as np
-the dtypes are available as ``np.bool``, ``np.float32``, etc.
+the dtypes are available as ``np.bool_``, ``np.float32``, etc.
Advanced types, not listed in the table above, are explored in
section :ref:`structured_arrays`.
@@ -90,9 +97,9 @@ the type itself as a function. For example: ::
array([0, 1, 2], dtype=int8)
Note that, above, we use the *Python* float object as a dtype. NumPy knows
-that ``int`` refers to ``np.int``, ``bool`` means ``np.bool`` and
-that ``float`` is ``np.float``. The other data-types do not have Python
-equivalents.
+that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
+that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
+The other data-types do not have Python equivalents.
To determine the type of an array, look at the dtype attribute::
@@ -136,3 +143,5 @@ value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index 7b6179663..3826a70b3 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -175,3 +175,5 @@ See `this article <http://www.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
index 23e7d7f6e..7baa648a1 100644
--- a/numpy/doc/byteswapping.py
+++ b/numpy/doc/byteswapping.py
@@ -1,4 +1,4 @@
-'''
+"""
=============================
Byteswapping and byte order
@@ -134,4 +134,6 @@ the previous operations:
>>> swapped_end_arr.tostring() == big_end_str
False
-'''
+"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 722147dd8..36f94d307 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -10,6 +10,8 @@ Numpy includes several constants:
#
# Note: the docstring is autogenerated.
#
+from __future__ import division, absolute_import, print_function
+
import textwrap, re
# Maintain same format as in numpy.add_newdocs
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
index 9a204e252..e00c26e54 100644
--- a/numpy/doc/creation.py
+++ b/numpy/doc/creation.py
@@ -106,9 +106,9 @@ check the last section as well)
HDF5: PyTables
FITS: PyFITS
-Examples of formats that cannot be read directly but for which it is not hard
-to convert are libraries like PIL (able to read and write many image formats
-such as jpg, png, etc).
+Examples of formats that cannot be read directly but for which it is not hard to
+convert are those formats supported by libraries like PIL (able to read and
+write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
@@ -141,3 +141,5 @@ random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 883100491..b9053cc07 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -415,3 +415,5 @@ Glossary
and f2py (which wraps Fortran).
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/howtofind.py b/numpy/doc/howtofind.py
index 29ad05318..89f8ac9b9 100644
--- a/numpy/doc/howtofind.py
+++ b/numpy/doc/howtofind.py
@@ -7,3 +7,5 @@ How to Find Stuff
How to find things in NumPy.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 99def8889..997aee1b7 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -405,3 +405,5 @@ converted to an array as a list would be. As an example: ::
40
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py
index a74429368..73ec51332 100644
--- a/numpy/doc/internals.py
+++ b/numpy/doc/internals.py
@@ -160,3 +160,5 @@ when accessing elements of an array. Granted, it goes against the grain, but
it is more in line with Python semantics and the natural order of the data.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/io.py b/numpy/doc/io.py
index 3cde40bd0..aeffdaca0 100644
--- a/numpy/doc/io.py
+++ b/numpy/doc/io.py
@@ -7,3 +7,5 @@ Array I/O
Placeholder for array I/O documentation.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/jargon.py b/numpy/doc/jargon.py
index e13ff5686..56b25ad7a 100644
--- a/numpy/doc/jargon.py
+++ b/numpy/doc/jargon.py
@@ -7,3 +7,5 @@ Jargon
Placeholder for computer science, engineering and other jargon.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/methods_vs_functions.py b/numpy/doc/methods_vs_functions.py
index 22eadccf7..89c26b841 100644
--- a/numpy/doc/methods_vs_functions.py
+++ b/numpy/doc/methods_vs_functions.py
@@ -7,3 +7,5 @@ Methods vs. Functions
Placeholder for Methods vs. Functions documentation.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 8fa3f8a31..79c16a56e 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -226,3 +226,5 @@ Interfacing to C++:
5) SIP (used mainly in PyQT)
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/performance.py b/numpy/doc/performance.py
index 1429e232f..6b0b55042 100644
--- a/numpy/doc/performance.py
+++ b/numpy/doc/performance.py
@@ -7,3 +7,5 @@ Performance
Placeholder for Improving Performance documentation.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index af777efa4..304184d0d 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -191,12 +191,11 @@ Notice that `x` is created with a list of tuples. ::
(1.0, [[2.0, 6.0], [2.0, 6.0]])],
dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
-Notice that the fields are always returned in the same order regardless of
-the sequence they are asked for. ::
+The fields are returned in the order they are asked for.::
>>> x[['y','x']]
- array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
- dtype=[('x', '<f4'), ('y', '<f4')])
+ array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
+ dtype=[('y', '<f4'), ('x', '<f4')])
Filling structured arrays
=========================
@@ -221,3 +220,5 @@ You can find some more information on recarrays and structured arrays
<http://www.scipy.org/Cookbook/Recarray>`_.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index de0338060..31bf6e211 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -557,3 +557,5 @@ how this can work, have a look at the ``memmap`` class in
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py
index e85b47763..0b94e0537 100644
--- a/numpy/doc/ufuncs.py
+++ b/numpy/doc/ufuncs.py
@@ -135,3 +135,5 @@ results in an error. There are two alternatives:
a convenient way to apply these operators.
"""
+from __future__ import division, absolute_import, print_function
+
diff --git a/numpy/dual.py b/numpy/dual.py
index 4aa01cd30..00bdcb395 100644
--- a/numpy/dual.py
+++ b/numpy/dual.py
@@ -10,6 +10,8 @@ Numpy.
.. _Scipy : http://www.scipy.org
"""
+from __future__ import division, absolute_import, print_function
+
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 220cb3d87..26a63daa8 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -1,16 +1,17 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
__all__ = ['run_main','compile','f2py_testing']
import os
import sys
-import commands
+import subprocess
-import f2py2e
-import f2py_testing
-import diagnose
+from . import f2py2e
+from . import f2py_testing
+from . import diagnose
-from info import __doc__
+from .info import __doc__
run_main = f2py2e.run_main
main = f2py2e.main
diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py
index 104c2e1a8..49a2199bf 100644
--- a/numpy/f2py/__version__.py
+++ b/numpy/f2py/__version__.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
major = 2
try:
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index a12d92b7e..e835090f7 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -12,16 +12,19 @@ terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
-"""
-__version__ = "$Revision: 1.65 $"[10:-1]
-import __version__
-f2py_version = __version__.version
+"""
+from __future__ import division, absolute_import, print_function
import pprint
import sys
import types
-import cfuncs
+from functools import reduce
+
+from . import __version__
+from . import cfuncs
+
+f2py_version = __version__.version
errmess=sys.stderr.write
@@ -32,8 +35,6 @@ options={}
debugoptions=[]
wrapfuncs = 1
-if sys.version_info[0] >= 3:
- from functools import reduce
def outmess(t):
if options.get('verbose',1):
@@ -447,7 +448,7 @@ class throw_error:
self.mess = mess
def __call__(self,var):
mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess)
- raise F2PYError,mess
+ raise F2PYError(mess)
def l_and(*f):
l,l2='lambda v',[]
@@ -490,19 +491,19 @@ def getmultilineblock(rout,blockname,comment=1,counter=0):
except KeyError:
return
if not r: return
- if counter>0 and type(r) is type(''):
+ if counter > 0 and isinstance(r, str):
return
- if type(r) is type([]):
+ if isinstance(r, list):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
- r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:]
+ r = '\t/* start ' + blockname + ' multiline ('+repr(counter)+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
- r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/'
+ r = r[:-3] + '\n\t/* end multiline ('+repr(counter)+')*/'
else:
r = r[:-3]
else:
@@ -519,7 +520,7 @@ def getcallprotoargument(rout,cb_map={}):
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
- from capi_maps import getctype
+ from .capi_maps import getctype
arg_types,arg_types2 = [],[]
if l_and(isstringfunction,l_not(isfunction_wrap))(rout):
arg_types.extend(['char*','size_t'])
@@ -597,7 +598,7 @@ def gentitle(name):
return '/*%s %s %s*/'%(l*'*',name,l*'*')
def flatlist(l):
- if type(l)==types.ListType:
+ if isinstance(l, list):
return reduce(lambda x,y,f=flatlist:x+f(y),l,[])
return [l]
@@ -606,25 +607,25 @@ def stripcomma(s):
return s
def replace(str,d,defaultsep=''):
- if type(d)==types.ListType:
- return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),d)
- if type(str)==types.ListType:
- return map(lambda s,f=replace,sep=defaultsep,d=d:f(s,d,sep),str)
- for k in 2*d.keys():
+ if isinstance(d, list):
+ return [replace(str, _m, defaultsep) for _m in d]
+ if isinstance(str, list):
+ return [replace(_m, d, defaultsep) for _m in str]
+ for k in 2*list(d.keys()):
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
- if type(d[k])==types.ListType:
+ if isinstance(d[k], list):
str=str.replace('#%s#'%(k),sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k),d[k])
return str
def dictappend(rd,ar):
- if type(ar)==types.ListType:
+ if isinstance(ar, list):
for a in ar:
rd=dictappend(rd,a)
return rd
@@ -632,15 +633,15 @@ def dictappend(rd,ar):
if k[0]=='_':
continue
if k in rd:
- if type(rd[k])==str:
+ if isinstance(rd[k], str):
rd[k]=[rd[k]]
- if type(rd[k])==types.ListType:
- if type(ar[k])==types.ListType:
+ if isinstance(rd[k], list):
+ if isinstance(ar[k], list):
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
- elif type(rd[k])==types.DictType:
- if type(ar[k])==types.DictType:
+ elif isinstance(rd[k], dict):
+ if isinstance(ar[k], dict):
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
@@ -653,7 +654,7 @@ def dictappend(rd,ar):
def applyrules(rules,d,var={}):
ret={}
- if type(rules)==types.ListType:
+ if isinstance(rules, list):
for r in rules:
rr=applyrules(r,d,var)
ret=dictappend(ret,rr)
@@ -670,9 +671,9 @@ def applyrules(rules,d,var={}):
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
- if type(rules[k])==str:
+ if isinstance(rules[k], str):
ret[k]=replace(rules[k],d)
- elif type(rules[k])==types.ListType:
+ elif isinstance(rules[k], list):
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i},d,var)
@@ -680,13 +681,13 @@ def applyrules(rules,d,var={}):
ret[k].append(ar[k])
elif k[0]=='_':
continue
- elif type(rules[k])==types.DictType:
+ elif isinstance(rules[k], dict):
ret[k]=[]
for k1 in rules[k].keys():
- if type(k1)==types.FunctionType and k1(var):
- if type(rules[k][k1])==types.ListType:
+ if isinstance(k1, types.FunctionType) and k1(var):
+ if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
- if type(i)==types.DictType:
+ if isinstance(i, dict):
res=applyrules({'supertext':i},d,var)
if 'supertext' in res:
i=res['supertext']
@@ -694,15 +695,15 @@ def applyrules(rules,d,var={}):
ret[k].append(replace(i,d))
else:
i=rules[k][k1]
- if type(i)==types.DictType:
+ if isinstance(i, dict):
res=applyrules({'supertext':i},d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
- errmess('applyrules: ignoring rule %s.\n'%`rules[k]`)
- if type(ret[k])==types.ListType:
+ errmess('applyrules: ignoring rule %s.\n'%repr(rules[k]))
+ if isinstance(ret[k], list):
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 7fd1179c3..41bca45d2 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -9,20 +9,22 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.60 $"[10:-1]
-import __version__
+from . import __version__
f2py_version = __version__.version
import copy
import re
import os
import sys
-from auxfuncs import *
-from crackfortran import markoutercomma
-import cb_rules
+from .auxfuncs import *
+from .crackfortran import markoutercomma
+from . import cb_rules
# Numarray and Numeric users should set this False
using_newcore = True
@@ -190,9 +192,9 @@ if os.path.isfile('.f2py_f2cmap'):
f2cmap_all[k][k1] = d[k][k1]
outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k,k1,d[k][k1]))
else:
- errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],c2py_map.keys()))
+ errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],list(c2py_map.keys())))
outmess('Succesfully applied user defined changes from .f2py_f2cmap\n')
- except Exception, msg:
+ except Exception as msg:
errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
cformat_map={'double':'%g',
'float':'%g',
@@ -271,7 +273,7 @@ def getstrlength(var):
else:
errmess('getstrlength: function %s has no return value?!\n'%a)
if not isstring(var):
- errmess('getstrlength: expected a signature of a string but got: %s\n'%(`var`))
+ errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var)))
len='1'
if 'charselector' in var:
a=var['charselector']
@@ -282,7 +284,7 @@ def getstrlength(var):
if re.match(r'\(\s*([*]|[:])\s*\)',len) or re.match(r'([*]|[:])',len):
#if len in ['(*)','*','(:)',':']:
if isintent_hide(var):
- errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(`var`))
+ errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var)))
len='-1'
return len
@@ -302,11 +304,11 @@ def getarrdims(a,var,verbose=0):
# var['dimension'].reverse()
dim=copy.copy(var['dimension'])
ret['size']='*'.join(dim)
- try: ret['size']=`eval(ret['size'])`
+ try: ret['size']=repr(eval(ret['size']))
except: pass
ret['dims']=','.join(dim)
- ret['rank']=`len(dim)`
- ret['rank*[-1]']=`len(dim)*[-1]`[1:-1]
+ ret['rank']=repr(len(dim))
+ ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1]
for i in range(len(dim)): # solve dim for dependecies
v=[]
if dim[i] in depargs: v=[dim[i]]
@@ -334,7 +336,7 @@ def getarrdims(a,var,verbose=0):
% (d))
ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,0)
elif verbose :
- errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(`a`,`d`))
+ errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a),repr(d)))
if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1]
# if not isintent_c(var):
# var['dimension'].reverse()
@@ -367,26 +369,26 @@ def getpydocsign(a,var):
if hasinitvalue(var):
init,showinit=getinit(a,var)
- init='= %s'%(showinit)
+ init = ', optional\\n Default: %s' % showinit
if isscalar(var):
if isintent_inout(var):
- sig='%s :%s %s rank-0 array(%s,\'%s\')'%(a,init,opt,c2py_map[ctype],
- c2pycode_map[ctype],)
+ sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a,opt,c2py_map[ctype],
+ c2pycode_map[ctype], init)
else:
- sig='%s :%s %s %s'%(a,init,opt,c2py_map[ctype])
+ sig='%s : %s %s%s'%(a,opt,c2py_map[ctype],init)
sigout='%s : %s'%(out_a,c2py_map[ctype])
elif isstring(var):
if isintent_inout(var):
- sig='%s :%s %s rank-0 array(string(len=%s),\'c\')'%(a,init,opt,getstrlength(var))
+ sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a,opt,getstrlength(var),init)
else:
- sig='%s :%s %s string(len=%s)'%(a,init,opt,getstrlength(var))
+ sig='%s : %s string(len=%s)%s'%(a,opt,getstrlength(var),init)
sigout='%s : string(len=%s)'%(out_a,getstrlength(var))
elif isarray(var):
dim=var['dimension']
- rank=`len(dim)`
- sig='%s :%s %s rank-%s array(\'%s\') with bounds (%s)'%(a,init,opt,rank,
+ rank=repr(len(dim))
+ sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a,opt,rank,
c2pycode_map[ctype],
- ','.join(dim))
+ ','.join(dim), init)
if a==out_a:
sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\
%(a,rank,c2pycode_map[ctype],','.join(dim))
@@ -414,7 +416,7 @@ def getarrdocsign(a,var):
c2pycode_map[ctype],)
elif isarray(var):
dim=var['dimension']
- rank=`len(dim)`
+ rank=repr(len(dim))
sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a,rank,
c2pycode_map[ctype],
','.join(dim))
@@ -489,7 +491,7 @@ def sign2map(a,var):
ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr']
else:
ret['cbname']=a
- errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,lcb_map.keys()))
+ errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,list(lcb_map.keys())))
if isstring(var):
ret['length']=getstrlength(var)
if isarray(var):
@@ -588,7 +590,7 @@ def routsign2map(rout):
#else:
# errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u))
elif 'externals' in rout and rout['externals']:
- errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],`rout['externals']`))
+ errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],repr(rout['externals'])))
ret['callprotoargument'] = getcallprotoargument(rout,lcb_map) or ''
if isfunction(rout):
if 'result' in rout:
@@ -605,7 +607,7 @@ def routsign2map(rout):
ret['rformat']=c2buildvalue_map[ret['ctype']]
else:
ret['rformat']='O'
- errmess('routsign2map: no c2buildvalue key for type %s\n'%(`ret['ctype']`))
+ errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype'])))
if debugcapi(rout):
if ret['ctype'] in cformat_map:
ret['routdebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']])
@@ -614,7 +616,7 @@ def routsign2map(rout):
if isstringfunction(rout):
ret['rlength']=getstrlength(rout['vars'][a])
if ret['rlength']=='-1':
- errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(`rout['name']`))
+ errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name'])))
ret['rlength']='10'
if hasnote(rout):
ret['note']=rout['note']
@@ -742,8 +744,8 @@ void
nofargs=nofargs+1
if isoptional(var):
nofoptargs=nofoptargs+1
- ret['maxnofargs']=`nofargs`
- ret['nofoptargs']=`nofoptargs`
+ ret['maxnofargs']=repr(nofargs)
+ ret['nofoptargs']=repr(nofoptargs)
if hasnote(rout) and isfunction(rout) and 'result' in rout:
ret['routnote']=rout['note']
rout['note']=['See elsewhere.']
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 8e8320bfd..f80ab06f0 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -11,23 +11,23 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/20 11:27:58 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
-__version__ = "$Revision: 1.53 $"[10:-1]
+import pprint
+import sys
-import __version__
-f2py_version = __version__.version
+from . import __version__
+from .auxfuncs import *
+from . import cfuncs
+f2py_version = __version__.version
-import pprint
-import sys
-import types
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
-from auxfuncs import *
-import cfuncs
################## Rules for callback function ##############
@@ -414,7 +414,7 @@ def buildcallbacks(m):
def buildcallback(rout,um):
global cb_map
- import capi_maps
+ from . import capi_maps
outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um))
args,depargs=getargs(rout)
@@ -466,7 +466,7 @@ def buildcallback(rout,um):
if '_break' in r:
break
if 'args' in rd and 'optargs' in rd:
- if type(rd['optargs'])==type([]):
+ if isinstance(rd['optargs'], list):
rd['optargs']=rd['optargs']+["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
@@ -482,7 +482,7 @@ def buildcallback(rout,um):
,
#endif
"""]
- if type(rd['docreturn'])==types.ListType:
+ if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))
optargs=stripcomma(replace('#docsignopt#',
{'docsignopt':rd['docsignopt']}
@@ -499,10 +499,10 @@ def buildcallback(rout,um):
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
- if k in rd and type(rd[k])==types.ListType:
+ if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
- if k in rd and type(rd[k])==types.ListType:
+ if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
@@ -515,7 +515,7 @@ def buildcallback(rout,um):
ar=applyrules(cb_routine_rules,rd)
cfuncs.callbacks[rd['name']]=ar['body']
- if type(ar['need'])==str:
+ if isinstance(ar['need'], str):
ar['need']=[ar['need']]
if 'need' in rd:
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 72ee4f7a4..2229c3e24 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -12,17 +12,17 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
-"""
-
-__version__ = "$Revision: 1.75 $"[10:-1]
-import __version__
-f2py_version = __version__.version
+"""
+from __future__ import division, absolute_import, print_function
-import types
import sys
import copy
-errmess=sys.stderr.write
+
+from . import __version__
+
+f2py_version = __version__.version
+errmess = sys.stderr.write
##################### Definitions ##################
@@ -1115,7 +1115,7 @@ capi_fail:
"""
def buildcfuncs():
- from capi_maps import c2capi_map
+ from .capi_maps import c2capi_map
for k in c2capi_map.keys():
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k])
@@ -1128,10 +1128,10 @@ def buildcfuncs():
def append_needs(need,flag=1):
global outneeds,needs
- if type(need)==types.ListType:
+ if isinstance(need, list):
for n in need:
append_needs(n,flag)
- elif type(need)==str:
+ elif isinstance(need, str):
if not need: return
if need in includes0:
n = 'includes0'
@@ -1152,7 +1152,7 @@ def append_needs(need,flag=1):
elif need in commonhooks:
n = 'commonhooks'
else:
- errmess('append_needs: unknown need %s\n'%(`need`))
+ errmess('append_needs: unknown need %s\n'%(repr(need)))
return
if need in outneeds[n]: return
if flag:
@@ -1160,7 +1160,7 @@ def append_needs(need,flag=1):
if need in needs:
for nn in needs[need]:
t=append_needs(nn,0)
- if type(t)==types.DictType:
+ if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=tmp[nnn]+t[nnn]
@@ -1176,7 +1176,7 @@ def append_needs(need,flag=1):
if need in needs:
for nn in needs[need]:
t=append_needs(nn,flag)
- if type(t)==types.DictType:
+ if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=t[nnn]+tmp[nnn]
@@ -1187,7 +1187,7 @@ def append_needs(need,flag=1):
tmp[n].append(need)
return tmp
else:
- errmess('append_needs: expected list or string but got :%s\n'%(`need`))
+ errmess('append_needs: expected list or string but got :%s\n'%(repr(need)))
def get_needs():
global outneeds,needs
@@ -1210,9 +1210,9 @@ def get_needs():
else:
out.append(outneeds[n][0])
del outneeds[n][0]
- if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])) \
+ if saveout and (0 not in map(lambda x,y:x==y, saveout, outneeds[n])) \
and outneeds[n] != []:
- print n,saveout
+ print(n,saveout)
errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out=out+saveout
break
diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py
index 3295676ef..dd44e3326 100644
--- a/numpy/f2py/common_rules.py
+++ b/numpy/f2py/common_rules.py
@@ -11,11 +11,13 @@ terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.19 $"[10:-1]
-import __version__
+from . import __version__
f2py_version = __version__.version
import pprint
@@ -24,10 +26,10 @@ errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
-from auxfuncs import *
-import capi_maps
-import func2subr
-from crackfortran import rmbadname
+from .auxfuncs import *
+from . import capi_maps
+from . import func2subr
+from .crackfortran import rmbadname
##############
def findcommonblocks(block,top=1):
@@ -94,7 +96,7 @@ def buildhooks(m):
cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at))
cadd('\t{NULL}\n};')
inames1 = rmbadname(inames)
- inames1_tps = ','.join(map(lambda s:'char *'+s,inames1))
+ inames1_tps = ','.join(['char *'+s for s in inames1])
cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps))
cadd('\tint i_f2py=0;')
for n in inames1:
@@ -119,7 +121,7 @@ def buildhooks(m):
dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n])))
if hasnote(vars[n]):
note = vars[n]['note']
- if type(note) is type([]): note='\n'.join(note)
+ if isinstance(note, list): note='\n'.join(note)
dadd('--- %s'%(note))
dadd('\\end{description}')
ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims))))
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 5d664e399..2ee8eb9a1 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
- Usage is explained in the comment block below.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
@@ -11,139 +10,136 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
-"""
-__version__ = "$Revision: 1.177 $"[10:-1]
-import platform
-import __version__
-f2py_version = __version__.version
-"""
- Usage of crackfortran:
- ======================
- Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
- -m <module name for f77 routines>,--ignore-contains
- Functions: crackfortran, crack2fortran
- The following Fortran statements/constructions are supported
- (or will be if needed):
- block data,byte,call,character,common,complex,contains,data,
- dimension,double complex,double precision,end,external,function,
- implicit,integer,intent,interface,intrinsic,
- logical,module,optional,parameter,private,public,
- program,real,(sequence?),subroutine,type,use,virtual,
- include,pythonmodule
- Note: 'virtual' is mapped to 'dimension'.
- Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
- Note: code after 'contains' will be ignored until its scope ends.
- Note: 'common' statement is extended: dimensions are moved to variable definitions
- Note: f2py directive: <commentchar>f2py<line> is read as <line>
- Note: pythonmodule is introduced to represent Python module
-
- Usage:
- `postlist=crackfortran(files,funcs)`
- `postlist` contains declaration information read from the list of files `files`.
- `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
-
- `postlist` has the following structure:
- *** it is a list of dictionaries containing `blocks':
- B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
- 'implicit','externals','interfaced','common','sortvars',
- 'commonvars','note']}
- B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
- 'program' | 'block data' | 'type' | 'pythonmodule'
- B['body'] --- list containing `subblocks' with the same structure as `blocks'
- B['parent_block'] --- dictionary of a parent block:
- C['body'][<index>]['parent_block'] is C
- B['vars'] --- dictionary of variable definitions
- B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
- B['name'] --- name of the block (not if B['block']=='interface')
- B['prefix'] --- prefix string (only if B['block']=='function')
- B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
- B['result'] --- name of the return value (only if B['block']=='function')
- B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
- B['externals'] --- list of variables being external
- B['interfaced'] --- list of variables being external and defined
- B['common'] --- dictionary of common blocks (list of objects)
- B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
- B['from'] --- string showing the 'parents' of the current block
- B['use'] --- dictionary of modules used in current block:
- {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
- B['note'] --- list of LaTeX comments on the block
- B['f2pyenhancements'] --- optional dictionary
- {'threadsafe':'','fortranname':<name>,
- 'callstatement':<C-expr>|<multi-line block>,
- 'callprotoargument':<C-expr-list>,
- 'usercode':<multi-line block>|<list of multi-line blocks>,
- 'pymethoddef:<multi-line block>'
- }
- B['entry'] --- dictionary {entryname:argslist,..}
- B['varnames'] --- list of variable names given in the order of reading the
- Fortran code, useful for derived types.
- B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
- *** Variable definition is a dictionary
- D = B['vars'][<variable name>] =
- {'typespec'[,'attrspec','kindselector','charselector','=','typename']}
- D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
- 'double precision' | 'integer' | 'logical' | 'real' | 'type'
- D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
- 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
- 'optional','required', etc)
- K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
- 'complex' | 'integer' | 'logical' | 'real' )
- C = D['charselector'] = {['*','len','kind']}
- (only if D['typespec']=='character')
- D['='] --- initialization expression string
- D['typename'] --- name of the type if D['typespec']=='type'
- D['dimension'] --- list of dimension bounds
- D['intent'] --- list of intent specifications
- D['depend'] --- list of variable names on which current variable depends on
- D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
- D['note'] --- list of LaTeX comments on the variable
- *** Meaning of kind/char selectors (few examples):
- D['typespec>']*K['*']
- D['typespec'](kind=K['kind'])
- character*C['*']
- character(len=C['len'],kind=C['kind'])
- (see also fortran type declaration statement formats below)
-
- Fortran 90 type declaration statement format (F77 is subset of F90)
+
+Usage of crackfortran:
+======================
+Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
+ -m <module name for f77 routines>,--ignore-contains
+Functions: crackfortran, crack2fortran
+The following Fortran statements/constructions are supported
+(or will be if needed):
+ block data,byte,call,character,common,complex,contains,data,
+ dimension,double complex,double precision,end,external,function,
+ implicit,integer,intent,interface,intrinsic,
+ logical,module,optional,parameter,private,public,
+ program,real,(sequence?),subroutine,type,use,virtual,
+ include,pythonmodule
+Note: 'virtual' is mapped to 'dimension'.
+Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
+Note: code after 'contains' will be ignored until its scope ends.
+Note: 'common' statement is extended: dimensions are moved to variable definitions
+Note: f2py directive: <commentchar>f2py<line> is read as <line>
+Note: pythonmodule is introduced to represent Python module
+
+Usage:
+ `postlist=crackfortran(files,funcs)`
+ `postlist` contains declaration information read from the list of files `files`.
+ `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
+
+ `postlist` has the following structure:
+ *** it is a list of dictionaries containing `blocks':
+ B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
+ 'implicit','externals','interfaced','common','sortvars',
+ 'commonvars','note']}
+ B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
+ 'program' | 'block data' | 'type' | 'pythonmodule'
+ B['body'] --- list containing `subblocks' with the same structure as `blocks'
+ B['parent_block'] --- dictionary of a parent block:
+ C['body'][<index>]['parent_block'] is C
+ B['vars'] --- dictionary of variable definitions
+ B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
+ B['name'] --- name of the block (not if B['block']=='interface')
+ B['prefix'] --- prefix string (only if B['block']=='function')
+ B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
+ B['result'] --- name of the return value (only if B['block']=='function')
+ B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
+ B['externals'] --- list of variables being external
+ B['interfaced'] --- list of variables being external and defined
+ B['common'] --- dictionary of common blocks (list of objects)
+ B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
+ B['from'] --- string showing the 'parents' of the current block
+ B['use'] --- dictionary of modules used in current block:
+ {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
+ B['note'] --- list of LaTeX comments on the block
+ B['f2pyenhancements'] --- optional dictionary
+ {'threadsafe':'','fortranname':<name>,
+ 'callstatement':<C-expr>|<multi-line block>,
+ 'callprotoargument':<C-expr-list>,
+ 'usercode':<multi-line block>|<list of multi-line blocks>,
+ 'pymethoddef:<multi-line block>'
+ }
+ B['entry'] --- dictionary {entryname:argslist,..}
+ B['varnames'] --- list of variable names given in the order of reading the
+ Fortran code, useful for derived types.
+ B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
+ *** Variable definition is a dictionary
+ D = B['vars'][<variable name>] =
+ {'typespec'[,'attrspec','kindselector','charselector','=','typename']}
+ D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
+ 'double precision' | 'integer' | 'logical' | 'real' | 'type'
+ D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
+ 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
+ 'optional','required', etc)
+ K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
+ 'complex' | 'integer' | 'logical' | 'real' )
+ C = D['charselector'] = {['*','len','kind']}
+ (only if D['typespec']=='character')
+ D['='] --- initialization expression string
+ D['typename'] --- name of the type if D['typespec']=='type'
+ D['dimension'] --- list of dimension bounds
+ D['intent'] --- list of intent specifications
+ D['depend'] --- list of variable names on which current variable depends on
+ D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
+ D['note'] --- list of LaTeX comments on the variable
+ *** Meaning of kind/char selectors (few examples):
+ D['typespec>']*K['*']
+ D['typespec'](kind=K['kind'])
+ character*C['*']
+ character(len=C['len'],kind=C['kind'])
+ (see also fortran type declaration statement formats below)
+
+Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
- (Main source: IBM XL Fortran 5.1 Language Reference Manual)
- type declaration = <typespec> [[<attrspec>]::] <entitydecl>
- <typespec> = byte |
- character[<charselector>] |
- complex[<kindselector>] |
- double complex |
- double precision |
- integer[<kindselector>] |
- logical[<kindselector>] |
- real[<kindselector>] |
- type(<typename>)
- <charselector> = * <charlen> |
- ([len=]<len>[,[kind=]<kind>]) |
- (kind=<kind>[,len=<len>])
- <kindselector> = * <intlen> |
- ([kind=]<kind>)
- <attrspec> = comma separated list of attributes.
- Only the following attributes are used in
- building up the interface:
- external
- (parameter --- affects '=' key)
- optional
- intent
- Other attributes are ignored.
- <intentspec> = in | out | inout
- <arrayspec> = comma separated list of dimension bounds.
- <entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
- [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
-
- In addition, the following attributes are used: check,depend,note
-
- TODO:
- * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
- -> 'real x(2)')
- The above may be solved by creating appropriate preprocessor program, for example.
+(Main source: IBM XL Fortran 5.1 Language Reference Manual)
+type declaration = <typespec> [[<attrspec>]::] <entitydecl>
+<typespec> = byte |
+ character[<charselector>] |
+ complex[<kindselector>] |
+ double complex |
+ double precision |
+ integer[<kindselector>] |
+ logical[<kindselector>] |
+ real[<kindselector>] |
+ type(<typename>)
+<charselector> = * <charlen> |
+ ([len=]<len>[,[kind=]<kind>]) |
+ (kind=<kind>[,len=<len>])
+<kindselector> = * <intlen> |
+ ([kind=]<kind>)
+<attrspec> = comma separated list of attributes.
+ Only the following attributes are used in
+ building up the interface:
+ external
+ (parameter --- affects '=' key)
+ optional
+ intent
+ Other attributes are ignored.
+<intentspec> = in | out | inout
+<arrayspec> = comma separated list of dimension bounds.
+<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
+ [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
+
+In addition, the following attributes are used: check,depend,note
+
+TODO:
+ * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
+ -> 'real x(2)')
+ The above may be solved by creating appropriate preprocessor program, for example.
+
"""
-#
+from __future__ import division, absolute_import, print_function
+
import sys
import string
import fileinput
@@ -151,7 +147,12 @@ import re
import pprint
import os
import copy
-from auxfuncs import *
+import platform
+
+from . import __version__
+from .auxfuncs import *
+
+f2py_version = __version__.version
# Global flags:
strictf77=1 # Ignore `!' comments unless line[0]=='!'
@@ -214,12 +215,14 @@ for n in ['int','double','float','char','short','long','void','case','while',
'type','default']:
badnames[n]=n+'_bn'
invbadnames[n+'_bn']=n
+
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name]))
return badnames[name]
return name
-def rmbadname(names): return map(rmbadname1,names)
+
+def rmbadname(names): return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
@@ -227,7 +230,8 @@ def undo_rmbadname1(name):
%(name,invbadnames[name]))
return invbadnames[name]
return name
-def undo_rmbadname(names): return map(undo_rmbadname1,names)
+
+def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i=name.rfind('.')
@@ -290,10 +294,10 @@ def readfortrancode(ffile,dowithline=show,istop=1):
mline_mark = re.compile(r".*?'''")
if istop: dowithline('',-1)
ll,l1='',''
- spacedigits=[' ']+map(str,range(10))
+ spacedigits=[' '] + [str(_m) for _m in range(10)]
filepositiontext=''
fin=fileinput.FileInput(ffile)
- while 1:
+ while True:
l=fin.readline()
if not l: break
if fin.isfirstline():
@@ -312,7 +316,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
if strictf77: beginpattern=beginpattern77
else: beginpattern=beginpattern90
outmess('\tReading file %s (format:%s%s)\n'\
- %(`currentfilename`,sourcecodeform,
+ %(repr(currentfilename),sourcecodeform,
strictf77 and ',strict' or ''))
l=l.expandtabs().replace('\xa0',' ')
@@ -344,7 +348,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
- 'this code is in fix form?\n\tline=%s' % `l`)
+ 'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
# Continuation of a previous line
@@ -377,7 +381,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
elif sourcecodeform=='free':
if not cont and ext=='.pyf' and mline_mark.match(l):
l = l + '\n'
- while 1:
+ while True:
lc = fin.readline()
if not lc:
errmess('Unexpected end of file when reading multiline\n')
@@ -399,7 +403,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
ll=l
cont=(r is not None)
else:
- raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform`)
+ raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform))
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
m=includeline.match(origfinalline)
if m:
@@ -416,7 +420,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
readfortrancode(fn1,dowithline=dowithline,istop=0)
break
if not foundfile:
- outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs)))
+ outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1=ll
@@ -440,7 +444,7 @@ def readfortrancode(ffile,dowithline=show,istop=1):
readfortrancode(fn1,dowithline=dowithline,istop=0)
break
if not foundfile:
- outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs)))
+ outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext=''
@@ -519,7 +523,7 @@ def crackline(line,reset=0):
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
- assert reset==0,`reset` # XXX: non-zero reset values need testing
+ assert reset==0,repr(reset) # XXX: non-zero reset values need testing
crackline(l,reset)
return
if reset<0:
@@ -538,7 +542,7 @@ def crackline(line,reset=0):
fl=0
if f77modulename and neededmodule==groupcounter: fl=2
while groupcounter>fl:
- outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`))
+ outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter),repr(groupname)))
outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
@@ -592,7 +596,7 @@ def crackline(line,reset=0):
else: line='callfun %s(%s)'%(name,a)
m = callfunpattern[0].match(line)
if not m:
- outmess('crackline: could not resolve function call for line=%s.\n'%`line`)
+ outmess('crackline: could not resolve function call for line=%s.\n'%repr(line))
return
analyzeline(m,'callfun',line)
return
@@ -612,7 +616,7 @@ def crackline(line,reset=0):
if (m1) and (not m1.group('this')==groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' % \
- (`m1.group('this')`, `groupname[groupcounter]`,
+ (repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil==groupcounter:
@@ -669,7 +673,7 @@ def markoutercomma(line,comma=','):
l=l+'@'+comma+'@'
continue
l=l+c
- assert not f,`f,line,l,cc`
+ assert not f,repr((f,line,l,cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@','(').replace('@)@',')')
@@ -678,7 +682,7 @@ def appenddecl(decl,decl2,force=1):
if not decl: decl={}
if not decl2: return decl
if decl is decl2: return decl
- for k in decl2.keys():
+ for k in list(decl2.keys()):
if k=='typespec':
if force or k not in decl:
decl[k]=decl2[k]
@@ -795,7 +799,7 @@ def analyzeline(m,case,line):
grouplist[groupcounter]=[]
if needmodule:
if verbose>1:
- outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0)
+ outmess('analyzeline: Creating module block %s\n'%repr(f77modulename),0)
groupname[groupcounter]='module'
groupcache[groupcounter]['block']='python module'
groupcache[groupcounter]['name']=f77modulename
@@ -834,7 +838,7 @@ def analyzeline(m,case,line):
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename)
else:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
- for k in groupcache[groupcounter].keys():
+ for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
@@ -888,7 +892,7 @@ def analyzeline(m,case,line):
if args:
args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
- assert result is None,`result`
+ assert result is None,repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry',name,groupcounter)
elif case=='type':
@@ -920,8 +924,8 @@ def analyzeline(m,case,line):
if not m1:
if case in ['public','private']: k=''
else:
- print m.groupdict()
- outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`))
+ print(m.groupdict())
+ outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,repr(e)))
continue
else:
k=rmbadname1(m1.group('name'))
@@ -993,7 +997,7 @@ def analyzeline(m,case,line):
replace(',','+1j*(')
try:
v = eval(initexpr,{},params)
- except (SyntaxError,NameError,TypeError),msg:
+ except (SyntaxError,NameError,TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
% (initexpr, msg))
continue
@@ -1029,7 +1033,7 @@ def analyzeline(m,case,line):
decl['kindselector']=kindselect
decl['charselector']=charselect
decl['typename']=typename
- for k in decl.keys():
+ for k in list(decl.keys()):
if not decl[k]: del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
@@ -1149,21 +1153,21 @@ def analyzeline(m,case,line):
m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z',l,re.I)
if m2: rl[m2.group('local').strip()]=m2.group('use').strip()
else:
- outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`)
+ outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l))
else:
rl[l]=l
groupcache[groupcounter]['use'][name]['map']=rl
else:
pass
else:
- print m.groupdict()
+ print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this')=='usercode' and 'usercode' in d:
- if type(d['usercode']) is type(''):
+ if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
@@ -1180,7 +1184,7 @@ def analyzeline(m,case,line):
m.group('this'))
else:
if verbose>1:
- print m.groupdict()
+ print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name,ml):
@@ -1203,7 +1207,7 @@ def cracktypespec0(typespec,ll):
outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d=m1.groupdict()
- for k in d.keys(): d[k]=unmarkouterparen(d[k])
+ for k in list(d.keys()): d[k]=unmarkouterparen(d[k])
if typespec in ['complex','integer','logical','real','character','type']:
selector=d['this']
ll=d['after']
@@ -1269,7 +1273,7 @@ def updatevars(typespec,selector,attrspec,entitydecl):
for e in el1:
m=namepattern.match(e)
if not m:
- outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`))
+ outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(repr(e)))
continue
ename=rmbadname1(m.group('name'))
edecl={}
@@ -1283,7 +1287,7 @@ def updatevars(typespec,selector,attrspec,entitydecl):
if 'kindselector' not in edecl:
edecl['kindselector']=copy.copy(kindselect)
elif kindselect:
- for k in kindselect.keys():
+ for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k]))
else: edecl['kindselector'][k]=copy.copy(kindselect[k])
@@ -1294,7 +1298,7 @@ def updatevars(typespec,selector,attrspec,entitydecl):
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
%(ename,charselect))
elif charselect:
- for k in charselect.keys():
+ for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k]))
else: edecl['charselector'][k]=copy.copy(charselect[k])
@@ -1320,7 +1324,7 @@ def updatevars(typespec,selector,attrspec,entitydecl):
d1=m1.groupdict()
for lk in ['len','array','init']:
if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
- for k in d1.keys():
+ for k in list(d1.keys()):
if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
else: del d1[k]
if 'len' in d1 and 'array' in d1:
@@ -1362,7 +1366,7 @@ def updatevars(typespec,selector,attrspec,entitydecl):
edecl['=']=d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
- for k in edecl.keys():
+ for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename]=edecl
@@ -1379,19 +1383,19 @@ def cracktypespec(typespec,selector):
if typespec in ['complex','integer','logical','real']:
kindselect=kindselector.match(selector)
if not kindselect:
- outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`))
+ outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector)))
return
kindselect=kindselect.groupdict()
kindselect['*']=kindselect['kind2']
del kindselect['kind2']
- for k in kindselect.keys():
+ for k in list(kindselect.keys()):
if not kindselect[k]: del kindselect[k]
- for k,i in kindselect.items():
+ for k,i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec=='character':
charselect=charselector.match(selector)
if not charselect:
- outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`))
+ outmess('cracktypespec: no charselector pattern found for %s\n'%(repr(selector)))
return
charselect=charselect.groupdict()
charselect['*']=charselect['charlen']
@@ -1405,16 +1409,16 @@ def cracktypespec(typespec,selector):
charselect[lk]=lenkind[lk]
del lenkind[lk+'2']
del charselect['lenkind']
- for k in charselect.keys():
+ for k in list(charselect.keys()):
if not charselect[k]: del charselect[k]
- for k,i in charselect.items():
+ for k,i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec=='type':
typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)',selector,re.I)
if typename: typename=typename.group('name')
- else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`))
+ else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector)))
else:
- outmess('cracktypespec: no selector used for %s\n'%(`selector`))
+ outmess('cracktypespec: no selector used for %s\n'%(repr(selector)))
return kindselect,charselect,typename
######
def setattrspec(decl,attr,force=0):
@@ -1447,7 +1451,7 @@ def setkindselector(decl,sel,force=0):
if 'kindselector' not in decl:
decl['kindselector']=sel
return decl
- for k in sel.keys():
+ for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k]=sel[k]
return decl
@@ -1460,7 +1464,7 @@ def setcharselector(decl,sel,force=0):
if 'charselector' not in decl:
decl['charselector']=sel
return decl
- for k in sel.keys():
+ for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k]=sel[k]
return decl
@@ -1494,7 +1498,7 @@ def get_useparameters(block, param_map=None):
usedict = get_usedict(block)
if not usedict:
return param_map
- for usename,mapping in usedict.items():
+ for usename,mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name')))
@@ -1506,10 +1510,10 @@ def get_useparameters(block, param_map=None):
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
- for k,v in params.items():
+ for k,v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'\
- ' value from module %s' % (`k`,`usename`))
+ ' value from module %s' % (repr(k),repr(usename)))
param_map[k] = v
return param_map
@@ -1518,7 +1522,7 @@ def postcrack2(block,tab='',param_map=None):
global f90modulevars
if not f90modulevars:
return block
- if type(block)==types.ListType:
+ if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g,tab=tab+'\t',param_map=param_map)
@@ -1532,7 +1536,7 @@ def postcrack2(block,tab='',param_map=None):
if param_map is not None and 'vars' in block:
vars = block['vars']
- for n in vars.keys():
+ for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
@@ -1555,7 +1559,7 @@ def postcrack(block,args=None,tab=''):
determine expression types if in argument list
"""
global usermodules,onlyfunctions
- if type(block)==types.ListType:
+ if isinstance(block, list):
gret=[]
uret=[]
for g in block:
@@ -1567,7 +1571,7 @@ def postcrack(block,args=None,tab=''):
gret.append(g)
return uret+gret
setmesstext(block)
- if (not type(block)==types.DictType) and 'block' not in block:
+ if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' + \
str(block))
if 'name' in block and not block['name']=='unknown_interface':
@@ -1585,11 +1589,11 @@ def postcrack(block,args=None,tab=''):
## fromuser = []
if 'use' in block:
useblock=block['use']
- for k in useblock.keys():
+ for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
## if 'map' in useblock[k]:
-## for n in useblock[k]['map'].values():
+## for n in useblock[k]['map'].itervalues():
## if n not in fromuser: fromuser.append(n)
else: useblock={}
name=''
@@ -1646,7 +1650,7 @@ def postcrack(block,args=None,tab=''):
def sortvarnames(vars):
indep = []
dep = []
- for v in vars.keys():
+ for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
#print '%s depends on %s'%(v,vars[v]['depend'])
@@ -1680,7 +1684,7 @@ def sortvarnames(vars):
def analyzecommon(block):
if not hascommon(block): return block
commonvars=[]
- for k in block['common'].keys():
+ for k in list(block['common'].keys()):
comvars=[]
for e in block['common'][k]:
m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z',e,re.I)
@@ -1748,9 +1752,9 @@ def buildimplicitrules(block):
if block['implicit'] is None:
implicitrules=None
if verbose>1:
- outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`)
+ outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name']))
else:
- for k in block['implicit'].keys():
+ for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static','automatic']:
implicitrules[k]=block['implicit'][k]
else:
@@ -1815,22 +1819,22 @@ def getarrlen(dl,args,star='*'):
except: edl.append(dl[0])
try: edl.append(myeval(dl[1],{},{}))
except: edl.append(dl[1])
- if type(edl[0]) is type(0):
+ if isinstance(edl[0], int):
p1 = 1-edl[0]
if p1==0: d = str(dl[1])
elif p1<0: d = '%s-%s'%(dl[1],-p1)
else: d = '%s+%s'%(dl[1],p1)
- elif type(edl[1]) is type(0):
+ elif isinstance(edl[1], int):
p1 = 1+edl[1]
if p1==0: d='-(%s)' % (dl[0])
else: d='%s-(%s)' % (p1,dl[0])
else: d = '%s-(%s)+1'%(dl[1],dl[0])
- try: return `myeval(d,{},{})`,None,None
+ try: return repr(myeval(d,{},{})),None,None
except: pass
d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args)
if None not in [d1[0],d2[0]]:
if (d1[0],d2[0])==(0,0):
- return `d2[1]-d1[1]+1`,None,None
+ return repr(d2[1]-d1[1]+1),None,None
b = d2[1] - d1[1] + 1
d1 = (d1[0],0,d1[2])
d2 = (d2[0],b,d2[2])
@@ -1845,7 +1849,7 @@ def getarrlen(dl,args,star='*'):
else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0])
if d1[2]==d2[2] and d1[2] in args:
a = d2[0] - d1[0]
- if not a: return `b`,None,None
+ if not a: return repr(b),None,None
if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a)
elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a)
else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a)
@@ -1926,13 +1930,13 @@ def _get_depend_dict(name, vars, deps):
if w not in words:
words.append(w)
else:
- outmess('_get_depend_dict: no dependence info for %s\n' % (`name`))
+ outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
- names = vars.keys()
+ names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
@@ -1943,12 +1947,12 @@ def get_sorted_names(vars):
"""
depend_dict = _calc_depend_dict(vars)
names = []
- for name in depend_dict.keys():
+ for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
- for name, lst in depend_dict.items():
+ for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
@@ -2034,18 +2038,18 @@ def get_parameters(vars, global_params={}):
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v,g_params,params)
- except Exception,msg:
+ except Exception as msg:
params[n] = v
#print params
- outmess('get_parameters: got "%s" on %s\n' % (msg,`v`))
- if isstring(vars[n]) and type(params[n]) is type(0):
+ outmess('get_parameters: got "%s" on %s\n' % (msg,repr(v)))
+ if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl!=n:
params[nl] = params[n]
else:
- print vars[n]
- outmess('get_parameters:parameter %s does not have value?!\n'%(`n`))
+ print(vars[n])
+ outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n)))
return params
def _eval_length(length,params):
@@ -2062,10 +2066,10 @@ def _eval_scalar(value,params):
value = str(eval(value,{},params))
except (NameError, SyntaxError):
return value
- except Exception,msg:
+ except Exception as msg:
errmess('"%s" in evaluating %r '\
'(available names: %s)\n' \
- % (msg,value,params.keys()))
+ % (msg,value,list(params.keys())))
return value
def analyzevars(block):
@@ -2079,7 +2083,7 @@ def analyzevars(block):
del vars['']
if 'attrspec' in block['vars']['']:
gen=block['vars']['']['attrspec']
- for n in vars.keys():
+ for n in list(vars.keys()):
for k in ['public','private']:
if k in gen:
vars[n]=setattrspec(vars[n],k)
@@ -2091,14 +2095,14 @@ def analyzevars(block):
svars.append(a)
except KeyError:
pass
- for n in vars.keys():
+ for n in list(vars.keys()):
if n not in args: svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
- for v in vars.keys():
+ for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
@@ -2107,13 +2111,13 @@ def analyzevars(block):
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match
for n in svars:
- if n[0] in attrrules.keys():
+ if n[0] in list(attrrules.keys()):
vars[n]=setattrspec(vars[n],attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
- for k in implicitrules[ln0].keys():
+ for k in list(implicitrules[ln0].keys()):
if k=='typespec' and implicitrules[ln0][k]=='undefined':
continue
if k not in vars[n]:
@@ -2122,7 +2126,7 @@ def analyzevars(block):
for l in implicitrules[ln0][k]:
vars[n]=setattrspec(vars[n],l)
elif n in block['args']:
- outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name']))
+ outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n),block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
@@ -2192,7 +2196,7 @@ def analyzevars(block):
star=':'
if d in params:
d = str(params[d])
- for p in params.keys():
+ for p in list(params.keys()):
m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)',d,re.I)
if m:
#outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
@@ -2206,7 +2210,7 @@ def analyzevars(block):
d = '*'
if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]]
if len(dl)==2:
- d,v,di = getarrlen(dl,block['vars'].keys())
+ d,v,di = getarrlen(dl,list(block['vars'].keys()))
if d[:4] == '1 * ': d = d[4:]
if di and di[-4:] == '/(1)': di = di[:-4]
if v: savelindims[d] = v,di
@@ -2253,7 +2257,7 @@ def analyzevars(block):
d = savelindims[d][0]
else:
for r in block['args']:
- #for r in block['vars'].keys():
+ #for r in block['vars'].iterkeys():
if r not in vars:
continue
if re.match(r'.*?\b'+r+r'\b',d,re.I):
@@ -2321,13 +2325,13 @@ def analyzevars(block):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend']=[]
- for v,m in dep_matches.items():
+ for v,m in list(dep_matches.items()):
if m(vars[n]['=']): vars[n]['depend'].append(v)
if not vars[n]['depend']: del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='],params)
- for n in vars.keys():
+ for n in list(vars.keys()):
if n==block['name']: # n is block name
if 'note' in vars[n]:
block['note']=vars[n]['note']
@@ -2357,18 +2361,18 @@ def analyzevars(block):
if ispure: vars[n]=setattrspec(vars[n],'pure')
if isrec: vars[n]=setattrspec(vars[n],'recursive')
else:
- outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`)
+ outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix']))
if not block['block'] in ['module','pythonmodule','python module','block data']:
if 'commonvars' in block:
neededvars=copy.copy(block['args']+block['commonvars'])
else:
neededvars=copy.copy(block['args'])
- for n in vars.keys():
+ for n in list(vars.keys()):
if l_or(isintent_callback,isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
- neededvars.extend(block['entry'].keys())
- for k in block['entry'].keys():
+ neededvars.extend(list(block['entry'].keys()))
+ for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
@@ -2382,8 +2386,8 @@ def analyzevars(block):
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
- neededvars.extend(vars.keys())
- for n in vars.keys():
+ neededvars.extend(list(vars.keys()))
+ for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
@@ -2433,7 +2437,7 @@ def analyzeargs(block):
args.append(a)
block['args']=args
if 'entry' in block:
- for k,args1 in block['entry'].items():
+ for k,args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a]={}
@@ -2454,14 +2458,15 @@ determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z',re.I)
def _ensure_exprdict(r):
- if type(r) is type(0):
+ if isinstance(r, int):
return {'typespec':'integer'}
- if type(r) is type(0.0):
+ if isinstance(r, float):
return {'typespec':'real'}
- if type(r) is type(0j):
+ if isinstance(r, complex):
return {'typespec':'complex'}
- assert type(r) is type({}),`r`
- return r
+ if isinstance(r, dict):
+ return r
+ raise AssertionError(repr(r))
def determineexprtype(expr,vars,rules={}):
if expr in vars:
@@ -2472,12 +2477,12 @@ def determineexprtype(expr,vars,rules={}):
m=determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
- outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
+ outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
- outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
+ outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'real'}
for op in ['+','-','*','/']:
for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]:
@@ -2499,7 +2504,7 @@ def determineexprtype(expr,vars,rules={}):
if expr[0] in '\'"':
return {'typespec':'character','charselector':{'*':'*'}}
if not t:
- outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`))
+ outmess('determineexprtype: could not determine expressions (%s) type.\n'%(repr(expr)))
return t
######
@@ -2534,7 +2539,7 @@ def crack2fortrangen(block,tab='\n', as_interface=False):
args='(%s)'%','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
- for k in block['f2pyenhancements'].keys():
+ for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k])
intent_lst = block.get('intent',[])[:]
if blocktype=='function' and 'callback' in intent_lst:
@@ -2564,7 +2569,7 @@ def crack2fortrangen(block,tab='\n', as_interface=False):
mess='! in %s'%block['from']
if 'entry' in block:
entry_stmts = ''
- for k,i in block['entry'].items():
+ for k,i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts,tab+tabchar,k,','.join(i))
body = body + entry_stmts
@@ -2575,7 +2580,7 @@ def crack2fortrangen(block,tab='\n', as_interface=False):
def common2fortran(common,tab=''):
ret=''
- for k in common.keys():
+ for k in list(common.keys()):
if k=='_BLNK_':
ret='%s%scommon %s'%(ret,tab,','.join(common[k]))
else:
@@ -2584,7 +2589,7 @@ def common2fortran(common,tab=''):
def use2fortran(use,tab=''):
ret=''
- for m in use.keys():
+ for m in list(use.keys()):
ret='%s%suse %s,'%(ret,tab,m)
if use[m]=={}:
if ret and ret[-1]==',': ret=ret[:-1]
@@ -2593,7 +2598,7 @@ def use2fortran(use,tab=''):
ret='%s only:'%(ret)
if 'map' in use[m] and use[m]['map']:
c=' '
- for k in use[m]['map'].keys():
+ for k in list(use[m]['map'].keys()):
if k==use[m]['map'][k]:
ret='%s%s%s'%(ret,c,k); c=','
else:
@@ -2606,7 +2611,7 @@ def true_intent_list(var):
ret = []
for intent in lst:
try:
- exec('c = isintent_%s(var)' % intent)
+ c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
@@ -2635,7 +2640,7 @@ def vars2fortran(block,vars,args,tab='', as_interface=False):
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
- for a in vars.keys():
+ for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
@@ -2794,7 +2799,7 @@ if __name__ == "__main__":
elif l=='-m':
f3=1
elif l[0]=='-':
- errmess('Unknown option %s\n'%`l`)
+ errmess('Unknown option %s\n'%repr(l))
elif f2:
f2=0
pyffilename=l
@@ -2805,7 +2810,7 @@ if __name__ == "__main__":
try:
open(l).close()
files.append(l)
- except IOError,detail:
+ except IOError as detail:
errmess('IOError: %s\n'%str(detail))
else:
funcs.append(l)
@@ -2819,7 +2824,7 @@ if __name__ == "__main__":
postlist=crackfortran(files,funcs)
if pyffilename:
- outmess('Writing fortran code to file %s\n'%`pyffilename`,0)
+ outmess('Writing fortran code to file %s\n'%repr(pyffilename),0)
pyf=crack2fortran(postlist)
f=open(pyffilename,'w')
f.write(pyf)
diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py
index 3b517a5c9..f00c3f0ac 100644
--- a/numpy/f2py/diagnose.py
+++ b/numpy/f2py/diagnose.py
@@ -1,42 +1,43 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
import os
import sys
import tempfile
def run_command(cmd):
- print 'Running %r:' % (cmd)
+ print('Running %r:' % (cmd))
s = os.system(cmd)
- print '------'
+ print('------')
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
- print '------'
- print 'os.name=%r' % (os.name)
- print '------'
- print 'sys.platform=%r' % (sys.platform)
- print '------'
- print 'sys.version:'
- print sys.version
- print '------'
- print 'sys.prefix:'
- print sys.prefix
- print '------'
- print 'sys.path=%r' % (':'.join(sys.path))
- print '------'
+ print('------')
+ print('os.name=%r' % (os.name))
+ print('------')
+ print('sys.platform=%r' % (sys.platform))
+ print('------')
+ print('sys.version:')
+ print(sys.version)
+ print('------')
+ print('sys.prefix:')
+ print(sys.prefix)
+ print('------')
+ print('sys.path=%r' % (':'.join(sys.path)))
+ print('------')
try:
import numpy
has_newnumpy = 1
except ImportError:
- print 'Failed to import new numpy:', sys.exc_value
+ print('Failed to import new numpy:', sys.exc_info()[1])
has_newnumpy = 0
try:
from numpy.f2py import f2py2e
has_f2py2e = 1
except ImportError:
- print 'Failed to import f2py2e:',sys.exc_value
+ print('Failed to import f2py2e:',sys.exc_info()[1])
has_f2py2e = 0
try:
@@ -47,102 +48,102 @@ def run():
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
- print 'Failed to import numpy_distutils:',sys.exc_value
+ print('Failed to import numpy_distutils:',sys.exc_info()[1])
has_numpy_distutils = 0
if has_newnumpy:
try:
- print 'Found new numpy version %r in %s' % \
- (numpy.__version__, numpy.__file__)
- except Exception,msg:
- print 'error:', msg
- print '------'
+ print('Found new numpy version %r in %s' % \
+ (numpy.__version__, numpy.__file__))
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
if has_f2py2e:
try:
- print 'Found f2py2e version %r in %s' % \
- (f2py2e.__version__.version,f2py2e.__file__)
- except Exception,msg:
- print 'error:',msg
- print '------'
+ print('Found f2py2e version %r in %s' % \
+ (f2py2e.__version__.version,f2py2e.__file__))
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
if has_numpy_distutils:
try:
if has_numpy_distutils == 2:
- print 'Found numpy.distutils version %r in %r' % (\
+ print('Found numpy.distutils version %r in %r' % (\
numpy.distutils.__version__,
- numpy.distutils.__file__)
+ numpy.distutils.__file__))
else:
- print 'Found numpy_distutils version %r in %r' % (\
+ print('Found numpy_distutils version %r in %r' % (\
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
- numpy_distutils.__file__)
- print '------'
- except Exception,msg:
- print 'error:',msg
- print '------'
+ numpy_distutils.__file__))
+ print('------')
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
try:
if has_numpy_distutils == 1:
- print 'Importing numpy_distutils.command.build_flib ...',
+ print('Importing numpy_distutils.command.build_flib ...', end=' ')
import numpy_distutils.command.build_flib as build_flib
- print 'ok'
- print '------'
+ print('ok')
+ print('------')
try:
- print 'Checking availability of supported Fortran compilers:'
+ print('Checking availability of supported Fortran compilers:')
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
- print '------'
- except Exception,msg:
- print 'error:',msg
- print '------'
- except Exception,msg:
- print 'error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)'
- print '------'
+ print('------')
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
+ except Exception as msg:
+ print('error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
+ print('------')
try:
if has_numpy_distutils == 2:
- print 'Importing numpy.distutils.fcompiler ...',
+ print('Importing numpy.distutils.fcompiler ...', end=' ')
import numpy.distutils.fcompiler as fcompiler
else:
- print 'Importing numpy_distutils.fcompiler ...',
+ print('Importing numpy_distutils.fcompiler ...', end=' ')
import numpy_distutils.fcompiler as fcompiler
- print 'ok'
- print '------'
+ print('ok')
+ print('------')
try:
- print 'Checking availability of supported Fortran compilers:'
+ print('Checking availability of supported Fortran compilers:')
fcompiler.show_fcompilers()
- print '------'
- except Exception,msg:
- print 'error:',msg
- print '------'
- except Exception,msg:
- print 'error:',msg
- print '------'
+ print('------')
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
try:
if has_numpy_distutils == 2:
- print 'Importing numpy.distutils.cpuinfo ...',
+ print('Importing numpy.distutils.cpuinfo ...', end=' ')
from numpy.distutils.cpuinfo import cpuinfo
- print 'ok'
- print '------'
+ print('ok')
+ print('------')
else:
try:
- print 'Importing numpy_distutils.command.cpuinfo ...',
+ print('Importing numpy_distutils.command.cpuinfo ...', end=' ')
from numpy_distutils.command.cpuinfo import cpuinfo
- print 'ok'
- print '------'
- except Exception,msg:
- print 'error:',msg,'(ignore it)'
- print 'Importing numpy_distutils.cpuinfo ...',
+ print('ok')
+ print('------')
+ except Exception as msg:
+ print('error:',msg,'(ignore it)')
+ print('Importing numpy_distutils.cpuinfo ...', end=' ')
from numpy_distutils.cpuinfo import cpuinfo
- print 'ok'
- print '------'
+ print('ok')
+ print('------')
cpu = cpuinfo()
- print 'CPU information:',
+ print('CPU information:', end=' ')
for name in dir(cpuinfo):
if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])():
- print name[1:],
- print '------'
- except Exception,msg:
- print 'error:',msg
- print '------'
+ print(name[1:], end=' ')
+ print('------')
+ except Exception as msg:
+ print('error:',msg)
+ print('------')
os.chdir(_path)
if __name__ == "__main__":
run()
diff --git a/numpy/f2py/doc/collectinput.py b/numpy/f2py/doc/collectinput.py
index 2e6c6c96b..317fde5b2 100755
--- a/numpy/f2py/doc/collectinput.py
+++ b/numpy/f2py/doc/collectinput.py
@@ -17,7 +17,9 @@ Usage:
collectinput <infile> <outfile>
collectinput <infile> # <outfile>=inputless_<infile>
collectinput # in and out are stdin and stdout
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "0.0"
@@ -25,7 +27,11 @@ stdoutflag=0
import sys
import fileinput
import re
-import commands
+
+if sys.version_info[0] >= 3:
+ from subprocess import getoutput
+else:
+ from commands import getoutput
try: fn=sys.argv[2]
except:
@@ -62,16 +68,16 @@ for l in fileinput.input(fi):
except: flag=0
if flag==0:
sys.stderr.write('Could not open a file: '+fn+'\n')
- print l+l1
+ print(l+l1)
continue
elif flag==1:
sys.stderr.write(fn+'\n')
- print '%%%%% Begin of '+fn
- print commands.getoutput(sys.argv[0]+' < '+fn)
- print '%%%%% End of '+fn
+ print('%%%%% Begin of '+fn)
+ print(getoutput(sys.argv[0]+' < '+fn))
+ print('%%%%% End of '+fn)
else:
sys.stderr.write('Could not extract a file name from: '+l)
- print l+l1
+ print(l+l1)
else:
- print l+l1
+ print(l+l1)
sys.stdout.close()
diff --git a/numpy/f2py/docs/pytest.py b/numpy/f2py/docs/pytest.py
index abd3487df..b61a607fe 100644
--- a/numpy/f2py/docs/pytest.py
+++ b/numpy/f2py/docs/pytest.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
#File: pytest.py
import Numeric
def foo(a):
diff --git a/numpy/f2py/docs/usersguide/setup_example.py b/numpy/f2py/docs/usersguide/setup_example.py
index e5f5e8441..8dfaa7a52 100644
--- a/numpy/f2py/docs/usersguide/setup_example.py
+++ b/numpy/f2py/docs/usersguide/setup_example.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
+
# File: setup_example.py
from numpy_distutils.core import Extension
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 4e6d2587f..64c13fff0 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -12,27 +12,27 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
-"""
-import __version__
-f2py_version = __version__.version
+"""
+from __future__ import division, absolute_import, print_function
import sys
import os
import pprint
-import types
import re
-errmess=sys.stderr.write
-#outmess=sys.stdout.write
-show=pprint.pprint
-import crackfortran
-import rules
-import cb_rules
-import auxfuncs
-import cfuncs
-import f90mod_rules
+from . import crackfortran
+from . import rules
+from . import cb_rules
+from . import auxfuncs
+from . import cfuncs
+from . import f90mod_rules
+from . import __version__
+f2py_version = __version__.version
+errmess = sys.stderr.write
+#outmess=sys.stdout.write
+show = pprint.pprint
outmess = auxfuncs.outmess
try:
@@ -210,7 +210,7 @@ def scaninputline(inputline):
elif l=='-h': f2=1
elif l=='-m': f3=1
elif l[:2]=='-v':
- print f2py_version
+ print(f2py_version)
sys.exit()
elif l=='--show-compilers':
f5=1
@@ -223,7 +223,7 @@ def scaninputline(inputline):
elif l[:15] in '--include-paths':
f7=1
elif l[0]=='-':
- errmess('Unknown option %s\n'%`l`)
+ errmess('Unknown option %s\n'%repr(l))
sys.exit()
elif f2: f2=0;signsfile=l
elif f3: f3=0;modulename=l
@@ -235,12 +235,12 @@ def scaninputline(inputline):
try:
open(l).close()
files.append(l)
- except IOError,detail:
+ except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l))
elif f==-1: skipfuncs.append(l)
elif f==0: onlyfuncs.append(l)
if not f5 and not files and not modulename:
- print __usage__
+ print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
@@ -322,7 +322,7 @@ def buildmodules(lst):
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
- outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]]))))
+ outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(['"%s"'%s for s in isusedby[mnames[i]]])))
else:
um=[]
if 'use' in modules[i]:
@@ -339,7 +339,7 @@ def dict_append(d_out,d_in):
for (k,v) in d_in.items():
if k not in d_out:
d_out[k] = []
- if type(v) is types.ListType:
+ if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
@@ -370,7 +370,7 @@ def run_main(comline_list):
if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
#if not quiet:
- outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']]))))
+ outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(['"%s"'%s for s in isusedby[postlist[i]['name']]])))
if 'signsfile' in options:
if options['verbose']>1:
outmess('Stopping. Edit the signature file and then run f2py on the signature file: ')
@@ -380,7 +380,7 @@ def run_main(comline_list):
if postlist[i]['block']!='python module':
if 'python module' not in options:
errmess('Tip: If your original code is Fortran source then you must use -m option.\n')
- raise TypeError('All blocks must be python module blocks but got %s'%(`postlist[i]['block']`))
+ raise TypeError('All blocks must be python module blocks but got %s'%(repr(postlist[i]['block'])))
auxfuncs.debugoptions=options['debug']
f90mod_rules.options=options
auxfuncs.wrapfuncs=options['wrapfuncs']
@@ -430,13 +430,15 @@ def run_compile():
remove_build_dir = 1
build_dir = os.path.join(tempfile.mktemp())
- sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:])
- sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv)
+ _reg1 = re.compile(r'[-][-]link[-]')
+ sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
- f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:])
- sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv)
+ _reg2 = re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include')
+ f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
@@ -450,12 +452,13 @@ def run_compile():
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
- sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv)
-
- flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:])
- sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv)
- fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:])
- sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv)
+ sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
+ _reg3 = re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)')
+ flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
+ _reg4 = re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))')
+ fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
if 1:
del_list = []
@@ -464,7 +467,7 @@ def run_compile():
if s[:len(v)]==v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
- allowed_keys = fcompiler.fcompiler_class.keys()
+ allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
@@ -472,7 +475,7 @@ def run_compile():
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
- print 'Unknown vendor: "%s"' % (s[len(v):])
+ print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
@@ -480,9 +483,12 @@ def run_compile():
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
- assert len(flib_flags)<=2,`flib_flags`
- setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:])
- sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv)
+ assert len(flib_flags)<=2,repr(flib_flags)
+
+ _reg5 = re.compile(r'[-][-](verbose)')
+ setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
+
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
@@ -525,7 +531,7 @@ def run_compile():
if len(name_value)==2:
define_macros[i] = tuple(name_value)
else:
- print 'Invalid use of -D:',name_value
+ print('Invalid use of -D:',name_value)
from numpy.distutils.system_info import get_info
@@ -557,7 +563,7 @@ def run_compile():
i = get_info(n)
if not i:
outmess('No %s resources found in system'\
- ' (try `f2py --help-link`)\n' % (`n`))
+ ' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args,**i)
ext = Extension(**ext_args)
diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py
index 0c78f3594..e5ba8a9d5 100644
--- a/numpy/f2py/f2py_testing.py
+++ b/numpy/f2py/f2py_testing.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import re
@@ -32,13 +34,13 @@ def run(runtest,test_functions,repeat=1):
else:
diff_memusage2 = memusage() - start_memusage
if diff_memusage2!=diff_memusage:
- print 'memory usage change at step %i:' % i,\
+ print('memory usage change at step %i:' % i,\
diff_memusage2-diff_memusage,\
- fname
+ fname)
diff_memusage = diff_memusage2
current_memusage = memusage()
- print 'run',repeat*len(test_functions),'tests',\
- 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0)
+ print('run',repeat*len(test_functions),'tests',\
+ 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0))
if start_memusage:
- print 'initial virtual memory size:',start_memusage,'bytes'
- print 'current virtual memory size:',current_memusage,'bytes'
+ print('initial virtual memory size:',start_memusage,'bytes')
+ print('current virtual memory size:',current_memusage,'bytes')
diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py
index e4a4b0e96..7e25a4930 100644
--- a/numpy/f2py/f90mod_rules.py
+++ b/numpy/f2py/f90mod_rules.py
@@ -11,7 +11,9 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/02/03 19:30:23 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.27 $"[10:-1]
@@ -23,11 +25,11 @@ errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
-from auxfuncs import *
+from .auxfuncs import *
import numpy as np
-import capi_maps
-import func2subr
-from crackfortran import undo_rmbadname, undo_rmbadname1
+from . import capi_maps
+from . import func2subr
+from .crackfortran import undo_rmbadname, undo_rmbadname1
options={}
@@ -82,7 +84,7 @@ fgetdims2_sa="""\
def buildhooks(pymod):
global fgetdims1,fgetdims2
- import rules
+ from . import rules
ret = {'f90modhooks':[],'initf90modhooks':[],'body':[],
'need':['F_FUNC','arrayobject.h'],
'separatorsfor':{'includes0':'\n','includes':'\n'},
@@ -117,7 +119,7 @@ def buildhooks(pymod):
dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n'%(m['name']))
if hasnote(m):
note = m['note']
- if type(note) is type([]): note='\n'.join(note)
+ if isinstance(note, list): note='\n'.join(note)
dadd(note)
if onlyvars:
dadd('\\begin{description}')
@@ -143,7 +145,7 @@ def buildhooks(pymod):
dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,var)))
if hasnote(var):
note = var['note']
- if type(note) is type([]): note='\n'.join(note)
+ if isinstance(note, list): note='\n'.join(note)
dadd('--- %s'%(note))
if isallocatable(var):
fargs.append('f2py_%s_getdims_%s'%(m['name'],n))
@@ -156,7 +158,7 @@ def buildhooks(pymod):
fadd('integer flag\n')
fhooks[0]=fhooks[0]+fgetdims1
dms = eval('range(1,%s+1)'%(dm['rank']))
- fadd(' allocate(d(%s))\n'%(','.join(map(lambda i:'s(%s)'%i,dms))))
+ fadd(' allocate(d(%s))\n'%(','.join(['s(%s)'%i for i in dms])))
fhooks[0]=fhooks[0]+use_fgetdims2
fadd('end subroutine %s'%(fargs[-1]))
else:
@@ -169,7 +171,7 @@ def buildhooks(pymod):
if hasbody(m):
for b in m['body']:
if not isroutine(b):
- print 'Skipping',b['block'],b['name']
+ print('Skipping',b['block'],b['name'])
continue
modobjs.append('%s()'%(b['name']))
b['modulename'] = m['name']
diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py
index f746108ad..f69750dd0 100644
--- a/numpy/f2py/func2subr.py
+++ b/numpy/f2py/func2subr.py
@@ -11,7 +11,9 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2004/11/26 11:13:06 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.16 $"[10:-1]
@@ -24,7 +26,7 @@ errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
-from auxfuncs import *
+from .auxfuncs import *
def var2fixfortran(vars,a,fa=None,f90mode=None):
if fa is None:
fa = a
diff --git a/numpy/f2py/info.py b/numpy/f2py/info.py
index 8beaba228..c895c5de2 100644
--- a/numpy/f2py/info.py
+++ b/numpy/f2py/info.py
@@ -1,5 +1,6 @@
"""Fortran to Python Interface Generator.
"""
+from __future__ import division, absolute_import, print_function
postpone_import = True
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 9d943e884..f7f82fc99 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -39,8 +39,7 @@ wrapper_function(args)
cleanup_a
return buildvalue
-"""
-"""
+
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
@@ -49,32 +48,34 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
-import __version__
+from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
-import types
import copy
-errmess=sys.stderr.write
-outmess=sys.stdout.write
-show=pprint.pprint
-
-from auxfuncs import *
-import capi_maps
-from capi_maps import *
-import cfuncs
-import common_rules
-import use_rules
-import f90mod_rules
-import func2subr
-options={}
+from .auxfuncs import *
+from . import capi_maps
+from .capi_maps import *
+from . import cfuncs
+from . import common_rules
+from . import use_rules
+from . import f90mod_rules
+from . import func2subr
+
+errmess = sys.stderr.write
+outmess = sys.stdout.write
+show = pprint.pprint
+
+options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
@@ -259,7 +260,7 @@ routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
-static char doc_#apiname#[] = \"\\\nFunction signature:\\n\\\n\t#docreturn##name#(#docsignatureshort#)\\n\\\n#docstrsigns#\";
+static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
@@ -351,10 +352,10 @@ rout_rules=[
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
- 'docstrreq':'Required arguments:',
- 'docstropt':'Optional arguments:',
- 'docstrout':'Return objects:',
- 'docstrcbs':'Call-back functions:',
+ 'docstrreq':'\\nParameters\\n----------',
+ 'docstropt':'\\nOther Parameters\\n----------------',
+ 'docstrout':'\\nReturns\\n-------',
+ 'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
@@ -482,7 +483,7 @@ rout_rules=[
},{ # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
- 'docstrout':'\t#pydocsignout#',
+ 'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
@@ -618,7 +619,7 @@ aux_rules=[
},
{
'return':',#varname#',
- 'docstrout':'\t#pydocsignout#',
+ 'docstrout':'#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
@@ -698,9 +699,9 @@ arg_rules=[
},
# Doc signatures
{
- 'docstropt':{l_and(isoptional,isintent_nothide):'\t#pydocsign#'},
- 'docstrreq':{l_and(isrequired,isintent_nothide):'\t#pydocsign#'},
- 'docstrout':{isintent_out:'\t#pydocsignout#'},
+ 'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'},
+ 'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'},
+ 'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
@@ -732,7 +733,7 @@ arg_rules=[
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
- 'docstropt':{isintent_nothide:'\t#varname#_extra_args := () input tuple'},
+ 'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
@@ -990,7 +991,7 @@ if (#varname#_capi==Py_None) {
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
- 'docstropt':'\toverwrite_#varname# := 1 input int',
+ 'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
@@ -1004,7 +1005,7 @@ if (#varname#_capi==Py_None) {
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
- 'docstropt':'\toverwrite_#varname# := 0 input int',
+ 'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
@@ -1242,7 +1243,7 @@ def buildmodule(m,um):
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
- errmess('buildmodule: unknown need %s.\n'%(`k`));continue
+ errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
@@ -1322,6 +1323,7 @@ def buildmodule(m,um):
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
+
def buildapi(rout):
rout,wrap = func2subr.assubr(rout)
args,depargs=getargs2(rout)
@@ -1353,10 +1355,10 @@ def buildapi(rout):
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
- vrd['nth']=`nth`+stnd[nth%10]+' argument'
+ vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
- vrd['nth']=`nthk`+stnd[nthk%10]+' keyword'
+ vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
@@ -1386,9 +1388,9 @@ def buildapi(rout):
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
- if type(rd['cleanupfrompyobj']) is types.ListType:
+ if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
- if type(rd['closepyobjfrom']) is types.ListType:
+ if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
@@ -1413,15 +1415,15 @@ def buildapi(rout):
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
- if type(rd['docreturn'])==types.ListType:
+ if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
- if k in rd and type(rd[k])==types.ListType:
+ if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
- if k in rd and type(rd[k])==types.ListType:
+ if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index 37aab191c..96a047ec7 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -14,7 +14,9 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.32 $
$Date: 2005/01/30 17:22:14 $
Pearu Peterson
+
"""
+from __future__ import division, print_function
__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
@@ -51,7 +53,7 @@ def configuration(parent_package='',top_path=None):
log.info('Creating %s', target)
f = open(target,'w')
f.write('''\
-#!/usr/bin/env %s
+#!%s
# See http://cens.ioc.ee/projects/f2py2e/
import os, sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
@@ -75,7 +77,7 @@ else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
-'''%(os.path.basename(sys.executable)))
+'''%(sys.executable))
f.close()
return target
diff --git a/numpy/f2py/setupscons.py b/numpy/f2py/setupscons.py
deleted file mode 100755
index e30fd8743..000000000
--- a/numpy/f2py/setupscons.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python
-"""
-setup.py for installing F2PY
-
-Usage:
- python setup.py install
-
-Copyright 2001-2005 Pearu Peterson all rights reserved,
-Pearu Peterson <pearu@cens.ioc.ee>
-Permission to use, modify, and distribute this software is given under the
-terms of the NumPy License.
-
-NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-$Revision: 1.32 $
-$Date: 2005/01/30 17:22:14 $
-Pearu Peterson
-"""
-
-__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
-
-import os
-import sys
-from distutils.dep_util import newer
-from numpy.distutils import log
-from numpy.distutils.core import setup
-from numpy.distutils.misc_util import Configuration
-
-from __version__ import version
-
-def configuration(parent_package='',top_path=None):
- config = Configuration('f2py', parent_package, top_path)
-
- config.add_data_dir('docs')
-
- config.add_data_files('src/fortranobject.c',
- 'src/fortranobject.h',
- 'f2py.1'
- )
-
- config.make_svn_version_py()
-
- def generate_f2py_py(build_dir):
- f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:]
- if f2py_exe[-4:]=='.exe':
- f2py_exe = f2py_exe[:-4] + '.py'
- if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
- f2py_exe = f2py_exe + '.py'
- target = os.path.join(build_dir,f2py_exe)
- if newer(__file__,target):
- log.info('Creating %s', target)
- f = open(target,'w')
- f.write('''\
-#!/usr/bin/env %s
-# See http://cens.ioc.ee/projects/f2py2e/
-import os, sys
-for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
- try:
- i=sys.argv.index("--"+mode)
- del sys.argv[i]
- break
- except ValueError: pass
-os.environ["NO_SCIPY_IMPORT"]="f2py"
-if mode=="g3-numpy":
- print >> sys.stderr, "G3 f2py support is not implemented, yet."
- sys.exit(1)
-elif mode=="2e-numeric":
- from f2py2e import main
-elif mode=="2e-numarray":
- sys.argv.append("-DNUMARRAY")
- from f2py2e import main
-elif mode=="2e-numpy":
- from numpy.f2py import main
-else:
- print >> sys.stderr, "Unknown mode:",`mode`
- sys.exit(1)
-main()
-'''%(os.path.basename(sys.executable)))
- f.close()
- return target
-
- config.add_scripts(generate_f2py_py)
-
- return config
-
-if __name__ == "__main__":
-
- config = configuration(top_path='')
- version = config.get_version()
- print 'F2PY Version',version
- config = config.todict()
-
- if sys.version[:3]>='2.3':
- config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
- "/F2PY-2-latest.tar.gz"
- config['classifiers'] = [
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: NumPy License',
- 'Natural Language :: English',
- 'Operating System :: OS Independent',
- 'Programming Language :: C',
- 'Programming Language :: Fortran',
- 'Programming Language :: Python',
- 'Topic :: Scientific/Engineering',
- 'Topic :: Software Development :: Code Generators',
- ]
- setup(version=version,
- description = "F2PY - Fortran to Python Interface Generaton",
- author = "Pearu Peterson",
- author_email = "pearu@cens.ioc.ee",
- maintainer = "Pearu Peterson",
- maintainer_email = "pearu@cens.ioc.ee",
- license = "BSD",
- platforms = "Unix, Windows (mingw|cygwin), Mac OSX",
- long_description = """\
-The Fortran to Python Interface Generator, or F2PY for short, is a
-command line tool (f2py) for generating Python C/API modules for
-wrapping Fortran 77/90/95 subroutines, accessing common blocks from
-Python, and calling Python functions from Fortran (call-backs).
-Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
- url = "http://cens.ioc.ee/projects/f2py2e/",
- keywords = ['Fortran','f2py'],
- **config)
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index f5e2c6244..c07de99c0 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -126,9 +126,9 @@ fortran_doc (FortranDataDef def) {
size += strlen(def.doc);
p = (char*)malloc (size);
p[0] = '\0'; /* make sure that the buffer has zero length */
- if (sprintf(p,"%s - ",def.name)==0) goto fail;
if (def.rank==-1) {
if (def.doc==NULL) {
+ if (sprintf(p,"%s - ",def.name)==0) goto fail;
if (sprintf(p+strlen(p),"no docs available")==0)
goto fail;
} else {
diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h
index 283021aa1..76a357b5e 100644
--- a/numpy/f2py/src/fortranobject.h
+++ b/numpy/f2py/src/fortranobject.h
@@ -9,7 +9,7 @@ extern "C" {
#ifdef FORTRANOBJECT_C
#define NO_IMPORT_ARRAY
#endif
-#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
+#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API
#include "numpy/arrayobject.h"
/*
@@ -31,27 +31,6 @@ extern "C" {
#define PyNumber_Int PyNumber_Long
#endif
-#if (PY_VERSION_HEX < 0x02060000)
-#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
-#define Py_REFCNT(o) (((PyObject*)(o))->ob_refcnt)
-#define Py_SIZE(o) (((PyVarObject*)(o))->ob_size)
-#endif
-
- /*
-#ifdef F2PY_REPORT_ATEXIT_DISABLE
-#undef F2PY_REPORT_ATEXIT
-#else
-
-#ifndef __FreeBSD__
-#ifndef __WIN32__
-#ifndef __APPLE__
-#define F2PY_REPORT_ATEXIT
-#endif
-#endif
-#endif
-
-#endif
- */
#ifdef F2PY_REPORT_ATEXIT
#include <sys/timeb.h>
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index be85a308a..6707e8d8b 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import unittest
import os
import sys
@@ -136,10 +138,10 @@ class Type(object):
self.dtypechar = typeinfo[self.NAME][0]
def cast_types(self):
- return map(self.__class__,self._cast_dict[self.NAME])
+ return [self.__class__(_m) for _m in self._cast_dict[self.NAME]]
def all_types(self):
- return map(self.__class__,self._type_names)
+ return [self.__class__(_m) for _m in self._type_names]
def smaller_types(self):
bits = typeinfo[self.NAME][3]
@@ -177,14 +179,14 @@ class Array(object):
# arr.dtypechar may be different from typ.dtypechar
self.arr = wrap.call(typ.type_num,dims,intent.flags,obj)
- assert_(isinstance(self.arr, ndarray),`type(self.arr)`)
+ assert_(isinstance(self.arr, ndarray),repr(type(self.arr)))
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims)>1:
if self.intent.is_intent('c'):
assert_(intent.flags & wrap.F2PY_INTENT_C)
- assert_(not self.arr.flags['FORTRAN'],`self.arr.flags,getattr(obj,'flags',None)`)
+ assert_(not self.arr.flags['FORTRAN'],repr((self.arr.flags,getattr(obj,'flags',None))))
assert_(self.arr.flags['CONTIGUOUS'])
assert_(not self.arr_attr[6] & wrap.FORTRAN)
else:
@@ -199,14 +201,14 @@ class Array(object):
return
if intent.is_intent('cache'):
- assert_(isinstance(obj,ndarray),`type(obj)`)
+ assert_(isinstance(obj,ndarray),repr(type(obj)))
self.pyarr = array(obj).reshape(*dims).copy()
else:
self.pyarr = array(array(obj,
dtype = typ.dtypechar).reshape(*dims),
order=self.intent.is_intent('c') and 'C' or 'F')
assert_(self.pyarr.dtype == typ, \
- `self.pyarr.dtype,typ`)
+ repr((self.pyarr.dtype,typ)))
assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
@@ -225,18 +227,18 @@ class Array(object):
assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions
if self.arr_attr[1]<=1:
assert_(self.arr_attr[3]==self.pyarr_attr[3],\
- `self.arr_attr[3],self.pyarr_attr[3],self.arr.tostring(),self.pyarr.tostring()`) # strides
+ repr((self.arr_attr[3],self.pyarr_attr[3],self.arr.tostring(),self.pyarr.tostring()))) # strides
assert_(self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\
- `self.arr_attr[5],self.pyarr_attr[5]`) # descr
+ repr((self.arr_attr[5],self.pyarr_attr[5]))) # descr
assert_(self.arr_attr[6]==self.pyarr_attr[6],\
- `self.arr_attr[6],self.pyarr_attr[6],flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]),flags2names(self.arr_attr[6]),intent`) # flags
+ repr((self.arr_attr[6],self.pyarr_attr[6],flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]),flags2names(self.arr_attr[6]),intent))) # flags
if intent.is_intent('cache'):
assert_(self.arr_attr[5][3]>=self.type.elsize,\
- `self.arr_attr[5][3],self.type.elsize`)
+ repr((self.arr_attr[5][3],self.type.elsize)))
else:
assert_(self.arr_attr[5][3]==self.type.elsize,\
- `self.arr_attr[5][3],self.type.elsize`)
+ repr((self.arr_attr[5][3],self.type.elsize)))
assert_(self.arr_equal(self.pyarr,self.arr))
if isinstance(self.obj,ndarray):
@@ -286,9 +288,9 @@ class _test_shared_memory:
obj = array(self.num2seq,dtype=t.dtype)
a = self.array([len(self.num2seq)],intent.in_,obj)
if t.elsize==self.type.elsize:
- assert_(a.has_shared_memory(),`self.type.dtype,t.dtype`)
+ assert_(a.has_shared_memory(),repr((self.type.dtype,t.dtype)))
else:
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_inout_2seq(self):
obj = array(self.num2seq,dtype=self.type.dtype)
@@ -297,7 +299,7 @@ class _test_shared_memory:
try:
a = self.array([2],intent.in_.inout,self.num2seq)
- except TypeError,msg:
+ except TypeError as msg:
if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'):
raise
else:
@@ -313,7 +315,7 @@ class _test_shared_memory:
shape = (len(self.num23seq),len(self.num23seq[0]))
try:
a = self.array(shape,intent.in_.inout,obj)
- except ValueError,msg:
+ except ValueError as msg:
if not str(msg).startswith('failed to initialize intent(inout) array'):
raise
else:
@@ -329,7 +331,7 @@ class _test_shared_memory:
for t in self.type.cast_types():
obj = array(self.num2seq,dtype=t.dtype)
a = self.array([len(self.num2seq)],intent.in_.copy,obj)
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_c_in_from_23seq(self):
a = self.array([len(self.num23seq),len(self.num23seq[0])],
@@ -341,7 +343,7 @@ class _test_shared_memory:
obj = array(self.num23seq,dtype=t.dtype)
a = self.array([len(self.num23seq),len(self.num23seq[0])],
intent.in_,obj)
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_f_in_from_23casttype(self):
for t in self.type.cast_types():
@@ -349,9 +351,9 @@ class _test_shared_memory:
a = self.array([len(self.num23seq),len(self.num23seq[0])],
intent.in_,obj)
if t.elsize==self.type.elsize:
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
else:
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_c_in_from_23casttype(self):
for t in self.type.cast_types():
@@ -359,23 +361,23 @@ class _test_shared_memory:
a = self.array([len(self.num23seq),len(self.num23seq[0])],
intent.in_.c,obj)
if t.elsize==self.type.elsize:
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
else:
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_f_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq,dtype=t.dtype,order='F')
a = self.array([len(self.num23seq),len(self.num23seq[0])],
intent.in_.copy,obj)
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_c_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = array(self.num23seq,dtype=t.dtype)
a = self.array([len(self.num23seq),len(self.num23seq[0])],
intent.in_.c.copy,obj)
- assert_(not a.has_shared_memory(),`t.dtype`)
+ assert_(not a.has_shared_memory(),repr(t.dtype))
def test_in_cache_from_2casttype(self):
for t in self.type.all_types():
@@ -384,21 +386,21 @@ class _test_shared_memory:
obj = array(self.num2seq,dtype=t.dtype)
shape = (len(self.num2seq),)
a = self.array(shape,intent.in_.c.cache,obj)
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
a = self.array(shape,intent.in_.cache,obj)
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
obj = array(self.num2seq,dtype=t.dtype,order='F')
a = self.array(shape,intent.in_.c.cache,obj)
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
a = self.array(shape,intent.in_.cache,obj)
- assert_(a.has_shared_memory(),`t.dtype`)
+ assert_(a.has_shared_memory(),repr(t.dtype))
try:
a = self.array(shape,intent.in_.cache,obj[::-1])
- except ValueError,msg:
+ except ValueError as msg:
if not str(msg).startswith('failed to initialize intent(cache) array'):
raise
else:
@@ -411,7 +413,7 @@ class _test_shared_memory:
shape = (len(self.num2seq),)
try:
a = self.array(shape,intent.in_.cache,obj)
- except ValueError,msg:
+ except ValueError as msg:
if not str(msg).startswith('failed to initialize intent(cache) array'):
raise
else:
@@ -429,7 +431,7 @@ class _test_shared_memory:
shape = (-1,3)
try:
a = self.array(shape,intent.cache.hide,None)
- except ValueError,msg:
+ except ValueError as msg:
if not str(msg).startswith('failed to create intent(cache|hide)|optional array'):
raise
else:
@@ -456,7 +458,7 @@ class _test_shared_memory:
shape = (-1,3)
try:
a = self.array(shape,intent.hide,None)
- except ValueError,msg:
+ except ValueError as msg:
if not str(msg).startswith('failed to create intent(cache|hide)|optional array'):
raise
else:
@@ -503,9 +505,9 @@ class _test_shared_memory:
assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
shape = obj.shape
a = self.array(shape,intent.inplace,obj)
- assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`)
+ assert_(obj[1][2]==a.arr[1][2],repr((obj,a.arr)))
a.arr[1][2]=54
- assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`)
+ assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),repr((obj,a.arr)))
assert_(a.arr is obj)
assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace!
assert_(not obj.flags['CONTIGUOUS'])
@@ -520,9 +522,9 @@ class _test_shared_memory:
assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS'])
shape = obj.shape
a = self.array(shape,intent.inplace,obj)
- assert_(obj[1][2]==a.arr[1][2],`obj,a.arr`)
+ assert_(obj[1][2]==a.arr[1][2],repr((obj,a.arr)))
a.arr[1][2]=54
- assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),`obj,a.arr`)
+ assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),repr((obj,a.arr)))
assert_(a.arr is obj)
assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace!
assert_(not obj.flags['CONTIGUOUS'])
@@ -530,14 +532,14 @@ class _test_shared_memory:
for t in Type._type_names:
- exec '''\
+ exec('''\
class test_%s_gen(unittest.TestCase,
_test_shared_memory
):
def setUp(self):
self.type = Type(%r)
array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
-''' % (t,t,t)
+''' % (t,t,t))
if __name__ == "__main__":
setup()
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index e501b13c3..6c0ea9ebd 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import math
@@ -19,16 +21,16 @@ class TestAssumedShapeSumExample(util.F2PyTest):
@dec.slow
def test_all(self):
r = self.module.fsum([1,2])
- assert_(r==3,`r`)
+ assert_(r==3,repr(r))
r = self.module.sum([1,2])
- assert_(r==3,`r`)
+ assert_(r==3,repr(r))
r = self.module.sum_with_use([1,2])
- assert_(r==3,`r`)
+ assert_(r==3,repr(r))
r = self.module.mod.sum([1,2])
- assert_(r==3,`r`)
+ assert_(r==3,repr(r))
r = self.module.mod.fsum([1,2])
- assert_(r==3,`r`)
+ assert_(r==3,repr(r))
if __name__ == "__main__":
import nose
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 6a201a951..7a7e8bc11 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -1,7 +1,10 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
import math
import util
+import textwrap
class TestF77Callback(util.F2PyTest):
code = """
@@ -38,27 +41,57 @@ cf2py intent(out) a
for name in "t,t2".split(","):
self.check_function(name)
+ @dec.slow
+ def test_docstring(self):
+ expected = """
+ a = t(fun,[fun_extra_args])
+
+ Wrapper for ``t``.
+
+ Parameters
+ ----------
+ fun : call-back function
+
+ Other Parameters
+ ----------------
+ fun_extra_args : input tuple, optional
+ Default: ()
+
+ Returns
+ -------
+ a : int
+
+ Notes
+ -----
+ Call-back functions::
+
+ def fun(): return a
+ Return objects:
+ a : int
+ """
+ assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip())
+
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda : 4)
- assert_( r==4,`r`)
+ assert_( r==4,repr(r))
r = t(lambda a:5,fun_extra_args=(6,))
- assert_( r==5,`r`)
+ assert_( r==5,repr(r))
r = t(lambda a:a,fun_extra_args=(6,))
- assert_( r==6,`r`)
+ assert_( r==6,repr(r))
r = t(lambda a:5+a,fun_extra_args=(7,))
- assert_( r==12,`r`)
+ assert_( r==12,repr(r))
r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,))
- assert_( r==180,`r`)
+ assert_( r==180,repr(r))
r = t(math.degrees,fun_extra_args=(math.pi,))
- assert_( r==180,`r`)
+ assert_( r==180,repr(r))
r = t(self.module.func, fun_extra_args=(6,))
- assert_( r==17,`r`)
+ assert_( r==17,repr(r))
r = t(self.module.func0)
- assert_( r==11,`r`)
+ assert_( r==11,repr(r))
r = t(self.module.func0._cpointer)
- assert_( r==11,`r`)
+ assert_( r==11,repr(r))
class A(object):
def __call__(self):
return 7
@@ -66,9 +99,9 @@ cf2py intent(out) a
return 9
a = A()
r = t(a)
- assert_( r==7,`r`)
+ assert_( r==7,repr(r))
r = t(a.mth)
- assert_( r==9,`r`)
+ assert_( r==9,repr(r))
if __name__ == "__main__":
import nose
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index a6d485a88..493c06942 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import math
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index a8a14ca4b..c4cb4889b 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import math
@@ -5,6 +7,7 @@ from numpy.testing import *
from numpy import array
import util
+import textwrap
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
@@ -20,6 +23,19 @@ class TestMixed(util.F2PyTest):
assert_( self.module.foo_fixed.bar12() == 12)
assert_( self.module.foo_free.bar13() == 13)
+ @dec.slow
+ def test_docstring(self):
+ expected = """
+ a = bar11()
+
+ Wrapper for ``bar11``.
+
+ Returns
+ -------
+ a : int
+ """
+ assert_equal(self.module.bar11.__doc__, textwrap.dedent(expected).lstrip())
+
if __name__ == "__main__":
import nose
nose.runmodule()
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 67c542688..213730008 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
from numpy.compat import asbytes
@@ -8,17 +10,17 @@ class TestReturnCharacter(util.F2PyTest):
tname = t.__doc__.split()[0]
if tname in ['t0','t1','s0','s1']:
assert_( t(23)==asbytes('2'))
- r = t('ab');assert_( r==asbytes('a'),`r`)
- r = t(array('ab'));assert_( r==asbytes('a'),`r`)
- r = t(array(77,'u1'));assert_( r==asbytes('M'),`r`)
+ r = t('ab');assert_( r==asbytes('a'),repr(r))
+ r = t(array('ab'));assert_( r==asbytes('a'),repr(r))
+ r = t(array(77,'u1'));assert_( r==asbytes('M'),repr(r))
#assert_(_raises(ValueError, t, array([77,87])))
#assert_(_raises(ValueError, t, array(77)))
elif tname in ['ts','ss']:
- assert_( t(23)==asbytes('23 '),`t(23)`)
+ assert_( t(23)==asbytes('23 '),repr(t(23)))
assert_( t('123456789abcdef')==asbytes('123456789a'))
elif tname in ['t5','s5']:
- assert_( t(23)==asbytes('23 '),`t(23)`)
- assert_( t('ab')==asbytes('ab '),`t('ab')`)
+ assert_( t(23)==asbytes('23 '),repr(t(23)))
+ assert_( t('ab')==asbytes('ab '),repr(t('ab')))
assert_( t('123456789abcdef')==asbytes('12345'))
else:
raise NotImplementedError
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index f8c6d226a..f03416648 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -1,5 +1,8 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
+from numpy.compat import long
import util
class TestReturnComplex(util.F2PyTest):
@@ -11,7 +14,7 @@ class TestReturnComplex(util.F2PyTest):
err = 0.0
assert_( abs(t(234j)-234.0j)<=err)
assert_( abs(t(234.6)-234.6)<=err)
- assert_( abs(t(234l)-234.0)<=err)
+ assert_( abs(t(long(234))-234.0)<=err)
assert_( abs(t(234.6+3j)-(234.6+3j))<=err)
#assert_( abs(t('234')-234.)<=err)
#assert_( abs(t('234.6')-234.6)<=err)
@@ -42,8 +45,8 @@ class TestReturnComplex(util.F2PyTest):
assert_raises(TypeError, t, {})
try:
- r = t(10l**400)
- assert_( `r` in ['(inf+0j)','(Infinity+0j)'],`r`)
+ r = t(10**400)
+ assert_( repr(r) in ['(inf+0j)','(Infinity+0j)'],repr(r))
except OverflowError:
pass
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index e1b3a37aa..d19653f4d 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -1,12 +1,15 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
+from numpy.compat import long
import util
class TestReturnInteger(util.F2PyTest):
def check_function(self, t):
- assert_( t(123)==123,`t(123)`)
+ assert_( t(123)==123,repr(t(123)))
assert_( t(123.6)==123)
- assert_( t(123l)==123)
+ assert_( t(long(123))==123)
assert_( t('123')==123)
assert_( t(-123)==-123)
assert_( t([123])==123)
@@ -32,7 +35,7 @@ class TestReturnInteger(util.F2PyTest):
assert_raises(Exception, t, {})
if t.__doc__.split()[0] in ['t8','s8']:
- assert_raises(OverflowError, t, 100000000000000000000000l)
+ assert_raises(OverflowError, t, 100000000000000000000000)
assert_raises(OverflowError, t, 10000000011111111111111.23)
class TestF77ReturnInteger(TestReturnInteger):
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index 059b843dc..3823e5642 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -1,11 +1,14 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
+from numpy.compat import long
import util
class TestReturnLogical(util.F2PyTest):
def check_function(self, t):
- assert_( t(True)==1,`t(True)`)
- assert_( t(False)==0,`t(False)`)
+ assert_( t(True)==1,repr(t(True)))
+ assert_( t(False)==0,repr(t(False)))
assert_( t(0)==0)
assert_( t(None)==0)
assert_( t(0.0)==0)
@@ -13,7 +16,7 @@ class TestReturnLogical(util.F2PyTest):
assert_( t(1j)==1)
assert_( t(234)==1)
assert_( t(234.6)==1)
- assert_( t(234l)==1)
+ assert_( t(long(234))==1)
assert_( t(234.6+3j)==1)
assert_( t('234')==1)
assert_( t('aaa')==1)
@@ -23,7 +26,7 @@ class TestReturnLogical(util.F2PyTest):
assert_( t({})==0)
assert_( t(t)==1)
assert_( t(-234)==1)
- assert_( t(10l**100)==1)
+ assert_( t(10**100)==1)
assert_( t([234])==1)
assert_( t((234,))==1)
assert_( t(array(234))==1)
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 5dc12708e..3286e11f2 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -1,5 +1,8 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy import array
+from numpy.compat import long
import math
import util
@@ -11,7 +14,7 @@ class TestReturnReal(util.F2PyTest):
err = 0.0
assert_( abs(t(234)-234.0)<=err)
assert_( abs(t(234.6)-234.6)<=err)
- assert_( abs(t(234l)-234.0)<=err)
+ assert_( abs(t(long(234))-234.0)<=err)
assert_( abs(t('234')-234)<=err)
assert_( abs(t('234.6')-234.6)<=err)
assert_( abs(t(-234)+234)<=err)
@@ -40,8 +43,8 @@ class TestReturnReal(util.F2PyTest):
assert_raises(Exception, t, {})
try:
- r = t(10l**400)
- assert_( `r` in ['inf','Infinity'],`r`)
+ r = t(10**400)
+ assert_( repr(r) in ['inf','Infinity'],repr(r))
except OverflowError:
pass
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index a548e9885..6cc508a19 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import math
@@ -16,29 +18,29 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_all(self):
r = self.module.foo([[1,2]])
- assert_equal(r, [3],`r`)
+ assert_equal(r, [3],repr(r))
r = self.module.foo([[1,2],[3,4]])
- assert_equal(r, [3,7],`r`)
+ assert_equal(r, [3,7],repr(r))
r = self.module.foo([[1,2],[3,4],[5,6]])
- assert_equal(r, [3,7,11],`r`)
+ assert_equal(r, [3,7,11],repr(r))
@dec.slow
def test_transpose(self):
r = self.module.trans([[1,2]])
- assert_equal(r, [[1],[2]],`r`)
+ assert_equal(r, [[1],[2]],repr(r))
r = self.module.trans([[1,2,3],[4,5,6]])
- assert_equal(r, [[1,4],[2,5],[3,6]],`r`)
+ assert_equal(r, [[1,4],[2,5],[3,6]],repr(r))
@dec.slow
def test_flatten(self):
r = self.module.flatten([[1,2]])
- assert_equal(r, [1,2],`r`)
+ assert_equal(r, [1,2],repr(r))
r = self.module.flatten([[1,2,3],[4,5,6]])
- assert_equal(r, [1,2,3,4,5,6],`r`)
+ assert_equal(r, [1,2,3,4,5,6],repr(r))
if __name__ == "__main__":
import nose
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index a5816b96f..56aff2b66 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -5,6 +5,7 @@ Utility functions for
- detecting if compilers are present
"""
+from __future__ import division, absolute_import, print_function
import os
import sys
@@ -57,7 +58,7 @@ def get_module_dir():
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
d = get_module_dir()
- for j in xrange(5403, 9999999):
+ for j in range(5403, 9999999):
name = "_test_ext_module_%d" % j
fn = os.path.join(d, name)
if name not in sys.modules and not os.path.isfile(fn+'.py'):
@@ -71,7 +72,7 @@ def _memoize(func):
if key not in memo:
try:
memo[key] = func(*a, **kw)
- except Exception, e:
+ except Exception as e:
memo[key] = e
raise
ret = memo[key]
diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py
index 021d08601..9a8054617 100644
--- a/numpy/f2py/use_rules.py
+++ b/numpy/f2py/use_rules.py
@@ -13,7 +13,9 @@ terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2000/09/10 12:35:43 $
Pearu Peterson
+
"""
+from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.3 $"[10:-1]
@@ -25,7 +27,7 @@ errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
-from auxfuncs import *
+from .auxfuncs import *
##############
usemodule_rules={
@@ -97,11 +99,11 @@ def buildusevar(name,realname,vars,usemodulename):
nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'}
vrd['texnamename']=name
for i in nummap.keys():
- vrd['texnamename']=vrd['texnamename'].replace(`i`,nummap[i])
+ vrd['texnamename']=vrd['texnamename'].replace(repr(i),nummap[i])
if hasnote(vars[realname]): vrd['note']=vars[realname]['note']
rd=dictappend({},vrd)
var=vars[realname]
- print name,realname,vars[realname]
+ print(name,realname,vars[realname])
ret=applyrules(usemodule_rules,rd)
return ret
diff --git a/numpy/fft/SConscript b/numpy/fft/SConscript
deleted file mode 100644
index adceea011..000000000
--- a/numpy/fft/SConscript
+++ /dev/null
@@ -1,8 +0,0 @@
-# Last Change: Thu Jun 12 06:00 PM 2008 J
-# vim:syntax=python
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.NumpyPythonExtension('fftpack_lite',
- source = ['fftpack_litemodule.c', 'fftpack.c'])
diff --git a/numpy/fft/SConstruct b/numpy/fft/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/fft/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index 324e39f4d..96809a94f 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -1,8 +1,10 @@
+from __future__ import division, absolute_import, print_function
+
# To get sub-modules
-from info import __doc__
+from .info import __doc__
-from fftpack import *
-from helper import *
+from .fftpack import *
+from .helper import *
from numpy.testing import Tester
test = Tester().test
diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py
index 80d21ec1d..4961b2989 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/fftpack.py
@@ -30,12 +30,14 @@ The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
-import fftpack_lite as fftpack
+from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
@@ -271,7 +273,8 @@ def rfft(a, n=None, axis=-1):
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
- The length of the transformed axis is ``n/2+1``.
+ If `n` is even, the length of the transformed axis is ``(n/2)+1``.
+ If `n` is odd, the length is ``(n+1)/2``.
Raises
------
@@ -293,14 +296,15 @@ def rfft(a, n=None, axis=-1):
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
- axis of the output is therefore ``n/2+1``.
+ axis of the output is therefore ``n//2+1``.
- When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
- must be purely real due to the Hermite symmetry.
+ When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
+ the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
- If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
- ``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
- contains the term for frequency ``A[(n-1)/2]``, and is complex in the
+ If `n` is even, ``A[-1]`` contains the term representing both positive
+ and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
+ real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
+ the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
@@ -343,7 +347,7 @@ def irfft(a, n=None, axis=-1):
The input array.
n : int, optional
Length of the transformed axis of the output.
- For `n` output points, ``n/2+1`` input points are necessary. If the
+ For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
@@ -507,7 +511,7 @@ def _cook_nd_args(a, s=None, axes=None, invreal=0):
shapeless = 0
s = list(s)
if axes is None:
- axes = range(-len(s), 0)
+ axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
@@ -518,7 +522,7 @@ def _cook_nd_args(a, s=None, axes=None, invreal=0):
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
- itl = range(len(axes))
+ itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
index 499c72828..6f6a6c9f3 100644
--- a/numpy/fft/fftpack_litemodule.c
+++ b/numpy/fft/fftpack_litemodule.c
@@ -45,10 +45,12 @@ fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args)
nrepeats = PyArray_SIZE(data)/npts;
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
cfftf(npts, dptr, wsave);
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
return (PyObject *)data;
@@ -96,10 +98,12 @@ fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args)
nrepeats = PyArray_SIZE(data)/npts;
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
cfftb(npts, dptr, wsave);
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
return (PyObject *)data;
@@ -131,7 +135,9 @@ fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args)
}
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
cffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
return (PyObject *)op;
@@ -183,6 +189,7 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
memcpy((char *)(rptr+1), dptr, npts*sizeof(double));
rfftf(npts, rptr+1, wsave);
@@ -191,6 +198,7 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
rptr += rstep;
dptr += npts;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
Py_DECREF(data);
@@ -245,6 +253,7 @@ fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double));
rptr[0] = dptr[0];
@@ -252,6 +261,7 @@ fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
rptr += npts;
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
Py_DECREF(data);
@@ -285,7 +295,9 @@ fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
rffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
return (PyObject *)op;
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index f6c570445..0a475153f 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -1,16 +1,20 @@
"""
Discrete Fourier Transforms - helper.py
+
"""
+from __future__ import division, absolute_import, print_function
+
+import numpy.core.numerictypes as nt
+from numpy.core import (
+ asarray, concatenate, arange, take, integer, empty
+ )
+
# Created by Pearu Peterson, September 2002
-__all__ = ['fftshift','ifftshift','fftfreq']
+__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
-from numpy.core import asarray, concatenate, arange, take, \
- integer, empty
-import numpy.core.numerictypes as nt
-import types
-def fftshift(x,axes=None):
+def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
@@ -57,7 +61,7 @@ def fftshift(x,axes=None):
tmp = asarray(x)
ndim = len(tmp.shape)
if axes is None:
- axes = range(ndim)
+ axes = list(range(ndim))
elif isinstance(axes, (int, nt.integer)):
axes = (axes,)
y = tmp
@@ -69,7 +73,7 @@ def fftshift(x,axes=None):
return y
-def ifftshift(x,axes=None):
+def ifftshift(x, axes=None):
"""
The inverse of fftshift.
@@ -105,7 +109,7 @@ def ifftshift(x,axes=None):
tmp = asarray(x)
ndim = len(tmp.shape)
if axes is None:
- axes = range(ndim)
+ axes = list(range(ndim))
elif isinstance(axes, (int, nt.integer)):
axes = (axes,)
y = tmp
@@ -116,28 +120,31 @@ def ifftshift(x,axes=None):
y = take(y,mylist,k)
return y
-def fftfreq(n,d=1.0):
+
+def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
- The returned float array contains the frequency bins in
- cycles/unit (with zero at the start) given a window length `n` and a
- sample spacing `d`::
+ The returned float array `f` contains the frequency bin centers in cycles
+ per unit of the sample spacing (with zero at the start). For instance, if
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
+
+ Given a window length `n` and a sample spacing `d`::
- f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
+ f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
- d : scalar
- Sample spacing.
-
+ d : scalar, optional
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
+
Returns
-------
- out : ndarray
- The array of length `n`, containing the sample frequencies.
+ f : ndarray
+ Array of length `n` containing the sample frequencies.
Examples
--------
@@ -150,13 +157,65 @@ def fftfreq(n,d=1.0):
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
"""
- assert isinstance(n,types.IntType) or isinstance(n, integer)
- val = 1.0/(n*d)
+ if not (isinstance(n,int) or isinstance(n, integer)):
+ raise ValueError("n should be an integer")
+ val = 1.0 / (n * d)
results = empty(n, int)
N = (n-1)//2 + 1
- p1 = arange(0,N,dtype=int)
+ p1 = arange(0, N, dtype=int)
results[:N] = p1
- p2 = arange(-(n//2),0,dtype=int)
+ p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
#return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)
+
+
+def rfftfreq(n, d=1.0):
+ """
+ Return the Discrete Fourier Transform sample frequencies
+ (for usage with rfft, irfft).
+
+ The returned float array `f` contains the frequency bin centers in cycles
+ per unit of the sample spacing (with zero at the start). For instance, if
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
+
+ Given a window length `n` and a sample spacing `d`::
+
+ f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
+ f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
+
+ Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
+ the Nyquist frequency component is considered to be positive.
+
+ Parameters
+ ----------
+ n : int
+ Window length.
+ d : scalar, optional
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
+
+ Returns
+ -------
+ f : ndarray
+ Array of length ``n//2 + 1`` containing the sample frequencies.
+
+ Examples
+ --------
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
+ >>> fourier = np.fft.rfft(signal)
+ >>> n = signal.size
+ >>> sample_rate = 100
+ >>> freq = np.fft.fftfreq(n, d=1./sample_rate)
+ >>> freq
+ array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.])
+ >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
+ >>> freq
+ array([ 0., 10., 20., 30., 40., 50.])
+
+ """
+ if not (isinstance(n,int) or isinstance(n, integer)):
+ raise ValueError("n should be an integer")
+ val = 1.0/(n*d)
+ N = n//2 + 1
+ results = arange(0, N, dtype=int)
+ return results * val
diff --git a/numpy/fft/info.py b/numpy/fft/info.py
index f36a07ebf..916d452f2 100644
--- a/numpy/fft/info.py
+++ b/numpy/fft/info.py
@@ -46,6 +46,7 @@ Helper routines
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
+ rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
@@ -112,7 +113,7 @@ The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
- \\qquad n = 0,\\ldots,n-1.
+ \\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the normalization by :math:`1/n`.
@@ -173,5 +174,6 @@ Examples
For examples, see the various functions.
"""
+from __future__ import division, absolute_import, print_function
depends = ['core']
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
index 6acad7c9a..23a945fce 100644
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -1,3 +1,4 @@
+from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
diff --git a/numpy/fft/setupscons.py b/numpy/fft/setupscons.py
deleted file mode 100644
index 54551b0a3..000000000
--- a/numpy/fft/setupscons.py
+++ /dev/null
@@ -1,15 +0,0 @@
-def configuration(parent_package = '', top_path = None):
- from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
- config = Configuration('fft', parent_package, top_path)
-
- config.add_data_dir('tests')
-
- config.add_sconscript('SConstruct',
- source_files = ['fftpack_litemodule.c', 'fftpack.c',
- 'fftpack.h'])
-
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_fftpack.py
index dceb3bd89..50702ab3c 100644
--- a/numpy/fft/tests/test_fftpack.py
+++ b/numpy/fft/tests/test_fftpack.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index c5578d390..f30d9fff6 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
-# Copied from fftpack.helper by Pearu Peterson, October 2005
-""" Test functions for fftpack.helper module
+"""Test functions for fftpack.helper module
+
+Copied from fftpack.helper by Pearu Peterson, October 2005
+
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
@@ -49,6 +51,17 @@ class TestFFTFreq(TestCase):
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
+class TestRFFTFreq(TestCase):
+
+ def test_definition(self):
+ x = [0, 1, 2, 3, 4]
+ assert_array_almost_equal(9*fft.rfftfreq(9), x)
+ assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
+ x = [0, 1, 2, 3, 4, 5]
+ assert_array_almost_equal(10*fft.rfftfreq(10), x)
+ assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
+
+
class TestIRFFTN(TestCase):
def test_not_last_axis_success(self):
diff --git a/numpy/lib/SConscript b/numpy/lib/SConscript
deleted file mode 100644
index 2d1ed5576..000000000
--- a/numpy/lib/SConscript
+++ /dev/null
@@ -1,7 +0,0 @@
-# Last Change: Thu Jun 12 06:00 PM 2008 J
-# vim:syntax=python
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Prepend(CPPPATH=["#$build_prefix/numpy/core/src/private"])
-env.NumpyPythonExtension('_compiled_base', source = ['src/_compiled_base.c'])
diff --git a/numpy/lib/SConstruct b/numpy/lib/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/lib/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 17557605d..64a8550c6 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -1,24 +1,28 @@
-from info import __doc__
+from __future__ import division, absolute_import, print_function
+
+import math
+
+from .info import __doc__
from numpy.version import version as __version__
-from type_check import *
-from index_tricks import *
-from function_base import *
-from shape_base import *
-from stride_tricks import *
-from twodim_base import *
-from ufunclike import *
+from .type_check import *
+from .index_tricks import *
+from .function_base import *
+from .nanfunctions import *
+from .shape_base import *
+from .stride_tricks import *
+from .twodim_base import *
+from .ufunclike import *
-import scimath as emath
-from polynomial import *
+from . import scimath as emath
+from .polynomial import *
#import convertcode
-from utils import *
-from arraysetops import *
-from npyio import *
-from financial import *
-import math
-from arrayterator import *
-from arraypad import *
+from .utils import *
+from .arraysetops import *
+from .npyio import *
+from .financial import *
+from .arrayterator import *
+from .arraypad import *
__all__ = ['emath','math']
__all__ += type_check.__all__
@@ -34,6 +38,7 @@ __all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
__all__ += financial.__all__
+__all__ += nanfunctions.__all__
from numpy.testing import Tester
test = Tester().test
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 390c56d36..617acdac1 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -31,10 +31,12 @@ Example::
>>> fp.close()
"""
+from __future__ import division, absolute_import, print_function
__docformat__ = "restructuredtext en"
import os
+import sys
from shutil import rmtree, copyfile, copyfileobj
_open = open
@@ -102,7 +104,7 @@ class _FileOpeners(object):
"""
self._load()
- return self._file_openers.keys()
+ return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
@@ -251,7 +253,10 @@ class DataSource (object):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
- from urlparse import urlparse
+ if sys.version_info[0] >= 3:
+ from urllib.parse import urlparse
+ else:
+ from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
@@ -270,8 +275,12 @@ class DataSource (object):
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
- from urllib2 import urlopen
- from urllib2 import URLError
+ if sys.version_info[0] >= 3:
+ from urllib.request import urlopen
+ from urllib.error import URLError
+ else:
+ from urllib2 import urlopen
+ from urllib2 import URLError
upath = self.abspath(path)
@@ -350,8 +359,10 @@ class DataSource (object):
"""
# We do this here to reduce the 'import numpy' initial import time.
- from urlparse import urlparse
-
+ if sys.version_info[0] >= 3:
+ from urllib.parse import urlparse
+ else:
+ from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
@@ -414,8 +425,12 @@ class DataSource (object):
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
- from urllib2 import urlopen
- from urllib2 import URLError
+ if sys.version_info[0] >= 3:
+ from urllib.request import urlopen
+ from urllib.error import URLError
+ else:
+ from urllib2 import urlopen
+ from urllib2 import URLError
# Test local path
if os.path.exists(path):
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 2f2a4bc57..aa39e25a1 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -1,12 +1,21 @@
-"""A collection of functions designed to help I/O with ascii files."""
+"""A collection of functions designed to help I/O with ascii files.
+
+"""
+from __future__ import division, absolute_import, print_function
+
__docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
-from __builtin__ import bool, int, long, float, complex, object, unicode, str
+from numpy.compat import asbytes, bytes, asbytes_nested, long, basestring
+
+if sys.version_info[0] >= 3:
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
+else:
+ from __builtin__ import bool, int, float, complex, object, unicode, str
-from numpy.compat import asbytes, bytes, asbytes_nested
if sys.version_info[0] >= 3:
def _bytes_to_complex(s):
@@ -83,7 +92,8 @@ def has_nested_fields(ndtype):
Raises
------
- AttributeError : If `ndtype` does not have a `names` attribute.
+ AttributeError
+ If `ndtype` does not have a `names` attribute.
Examples
--------
@@ -263,7 +273,7 @@ class NameValidator(object):
* If 'lower', field names are converted to lower case.
The default value is True.
- replace_space: '_', optional
+ replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
@@ -712,7 +722,8 @@ class StringConverter(object):
value = (value,)
_strict_call = self._strict_call
try:
- map(_strict_call, value)
+ for _m in value:
+ _strict_call(_m)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
@@ -845,7 +856,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
if nbtypes == 0:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
- ndtype = np.dtype(zip(names, formats))
+ ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=nbtypes,
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 6212ac7f3..90b5fcfcd 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1,47 +1,994 @@
"""
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
+
"""
+from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.compat import long
__all__ = ['pad']
-################################################################################
+###############################################################################
# Private utility functions.
-def _create_vector(vector, pad_tuple, before_val, after_val):
- '''
- Private function which creates the padded vector.
+def _arange_ndarray(arr, shape, axis, reverse=False):
+ """
+ Create an ndarray of `shape` with increments along specified `axis`
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ shape : tuple of ints
+ Shape of desired array. Should be equivalent to `arr.shape` except
+ `shape[axis]` which may have any positive value.
+ axis : int
+ Axis to increment along.
+ reverse : bool
+ If False, increment in a positive fashion from 1 to `shape[axis]`,
+ inclusive. If True, the bounds are the same but the order reversed.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array sized to pad `arr` along `axis`, with linear range from
+ 1 to `shape[axis]` along specified `axis`.
+
+ Notes
+ -----
+ The range is deliberately 1-indexed for this specific use case. Think of
+ this algorithm as broadcasting `np.arange` to a single `axis` of an
+ arbitrarily shaped ndarray.
+
+ """
+ initshape = tuple(1 if i != axis else shape[axis]
+ for (i, x) in enumerate(arr.shape))
+ if not reverse:
+ padarr = np.arange(1, shape[axis] + 1)
+ else:
+ padarr = np.arange(shape[axis], 0, -1)
+ padarr = padarr.reshape(initshape)
+ for i, dim in enumerate(shape):
+ if padarr.shape[i] != dim:
+ padarr = padarr.repeat(dim, axis=i)
+ return padarr
+
+
+def _round_ifneeded(arr, dtype):
+ """
+ Rounds arr inplace if destination dtype is integer.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ dtype : dtype
+ The dtype of the destination array.
+
+ """
+ if np.issubdtype(dtype, np.integer):
+ arr.round(out=arr)
+
+
+def _prepend_const(arr, pad_amt, val, axis=-1):
+ """
+ Prepend constant `val` along `axis` of `arr`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ val : scalar
+ Constant value to use. For best results should be of type `arr.dtype`;
+ if not `arr.dtype` will be cast to `arr.dtype`.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` constant `val` prepended along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+ padshape = tuple(x if i != axis else pad_amt
+ for (i, x) in enumerate(arr.shape))
+ if val == 0:
+ return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
+ axis=axis)
+ else:
+ return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
+ arr), axis=axis)
+
+
+def _append_const(arr, pad_amt, val, axis=-1):
+ """
+ Append constant `val` along `axis` of `arr`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ val : scalar
+ Constant value to use. For best results should be of type `arr.dtype`;
+ if not `arr.dtype` will be cast to `arr.dtype`.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` constant `val` appended along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+ padshape = tuple(x if i != axis else pad_amt
+ for (i, x) in enumerate(arr.shape))
+ if val == 0:
+ return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
+ axis=axis)
+ else:
+ return np.concatenate(
+ (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
+
+
+def _prepend_edge(arr, pad_amt, axis=-1):
+ """
+ Prepend `pad_amt` to `arr` along `axis` by extending edge values.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, extended by `pad_amt` edge values appended along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ edge_slice = tuple(slice(None) if i != axis else 0
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+ edge_arr = arr[edge_slice].reshape(pad_singleton)
+ return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
+ axis=axis)
+
+
+def _append_edge(arr, pad_amt, axis=-1):
+ """
+ Append `pad_amt` to `arr` along `axis` by extending edge values.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, extended by `pad_amt` edge values prepended along
+ `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+ edge_arr = arr[edge_slice].reshape(pad_singleton)
+ return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
+ axis=axis)
+
+
+def _prepend_ramp(arr, pad_amt, end, axis=-1):
+ """
+ Prepend linear ramp along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ end : scalar
+ Constal value to use. For best results should be of type `arr.dtype`;
+ if not `arr.dtype` will be cast to `arr.dtype`.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values prepended along `axis`. The
+ prepended region ramps linearly from the edge value to `end`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Generate shape for final concatenated array
+ padshape = tuple(x if i != axis else pad_amt
+ for (i, x) in enumerate(arr.shape))
+
+ # Generate an n-dimensional array incrementing along `axis`
+ ramp_arr = _arange_ndarray(arr, padshape, axis,
+ reverse=True).astype(np.float64)
+
+
+ # Appropriate slicing to extract n-dimensional edge along `axis`
+ edge_slice = tuple(slice(None) if i != axis else 0
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract edge, reshape to original rank, and extend along `axis`
+ edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+
+ # Linear ramp
+ slope = (end - edge_pad) / float(pad_amt)
+ ramp_arr = ramp_arr * slope
+ ramp_arr += edge_pad
+ _round_ifneeded(ramp_arr, arr.dtype)
+
+ # Ramp values will most likely be float, cast them to the same type as arr
+ return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
+
+
+def _append_ramp(arr, pad_amt, end, axis=-1):
+ """
+ Append linear ramp along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ end : scalar
+ Constal value to use. For best results should be of type `arr.dtype`;
+ if not `arr.dtype` will be cast to `arr.dtype`.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ appended region ramps linearly from the edge value to `end`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Generate shape for final concatenated array
+ padshape = tuple(x if i != axis else pad_amt
+ for (i, x) in enumerate(arr.shape))
+
+ # Generate an n-dimensional array incrementing along `axis`
+ ramp_arr = _arange_ndarray(arr, padshape, axis,
+ reverse=False).astype(np.float64)
+
+ # Slice a chunk from the edge to calculate stats on
+ edge_slice = tuple(slice(None) if i != axis else -1
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract edge, reshape to original rank, and extend along `axis`
+ edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
+
+ # Linear ramp
+ slope = (end - edge_pad) / float(pad_amt)
+ ramp_arr = ramp_arr * slope
+ ramp_arr += edge_pad
+ _round_ifneeded(ramp_arr, arr.dtype)
+
+ # Ramp values will most likely be float, cast them to the same type as arr
+ return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
+
+
+def _prepend_max(arr, pad_amt, num, axis=-1):
+ """
+ Prepend `pad_amt` maximum values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ num : int
+ Depth into `arr` along `axis` to calculate maximum.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ prepended region is the maximum of the first `num` values along
+ `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _prepend_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ max_slice = tuple(slice(None) if i != axis else slice(num)
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate max, reshape to add singleton dimension back
+ max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+
+ # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
+ axis=axis)
+
+
+def _append_max(arr, pad_amt, num, axis=-1):
+ """
+ Pad one `axis` of `arr` with the maximum of the last `num` elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ num : int
+ Depth into `arr` along `axis` to calculate maximum.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ appended region is the maximum of the final `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _append_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ end = arr.shape[axis] - 1
+ if num is not None:
+ max_slice = tuple(
+ slice(None) if i != axis else slice(end, end - num, -1)
+ for (i, x) in enumerate(arr.shape))
+ else:
+ max_slice = tuple(slice(None) for x in arr.shape)
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate max, reshape to add singleton dimension back
+ max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
+
+ # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
+ axis=axis)
+
+
+def _prepend_mean(arr, pad_amt, num, axis=-1):
+ """
+ Prepend `pad_amt` mean values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ num : int
+ Depth into `arr` along `axis` to calculate mean.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values prepended along `axis`. The
+ prepended region is the mean of the first `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _prepend_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ mean_slice = tuple(slice(None) if i != axis else slice(num)
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate mean, reshape to add singleton dimension back
+ mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
+ _round_ifneeded(mean_chunk, arr.dtype)
+
+ # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
+ arr), axis=axis)
+
+
+def _append_mean(arr, pad_amt, num, axis=-1):
+ """
+ Append `pad_amt` mean values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ num : int
+ Depth into `arr` along `axis` to calculate mean.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ appended region is the maximum of the final `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _append_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ end = arr.shape[axis] - 1
+ if num is not None:
+ mean_slice = tuple(
+ slice(None) if i != axis else slice(end, end - num, -1)
+ for (i, x) in enumerate(arr.shape))
+ else:
+ mean_slice = tuple(slice(None) for x in arr.shape)
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate mean, reshape to add singleton dimension back
+ mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
+ _round_ifneeded(mean_chunk, arr.dtype)
+
+ # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate(
+ (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+
+
+def _prepend_med(arr, pad_amt, num, axis=-1):
+ """
+ Prepend `pad_amt` median values along `axis`.
Parameters
----------
- vector : ndarray of rank 1, length N + pad_tuple[0] + pad_tuple[1]
- Input vector including blank padded values. `N` is the lenth of the
- original vector.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding along
- this particular iaxis.
- before_val : scalar or ndarray of rank 1, length pad_tuple[0]
- This is the value(s) that will pad the beginning of `vector`.
- after_val : scalar or ndarray of rank 1, length pad_tuple[1]
- This is the value(s) that will pad the end of the `vector`.
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ num : int
+ Depth into `arr` along `axis` to calculate median.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
Returns
-------
- _create_vector : ndarray
- Vector with before_val and after_val replacing the blank pad values.
- '''
- vector[:pad_tuple[0]] = before_val
- if pad_tuple[1] > 0:
- vector[-pad_tuple[1]:] = after_val
- return vector
+ padarr : ndarray
+ Output array, with `pad_amt` values prepended along `axis`. The
+ prepended region is the median of the first `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _prepend_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ med_slice = tuple(slice(None) if i != axis else slice(num)
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate median, reshape to add singleton dimension back
+ med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ _round_ifneeded(med_chunk, arr.dtype)
+
+ # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate(
+ (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
+
+
+def _append_med(arr, pad_amt, num, axis=-1):
+ """
+ Append `pad_amt` median values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ num : int
+ Depth into `arr` along `axis` to calculate median.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ appended region is the median of the final `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _append_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ end = arr.shape[axis] - 1
+ if num is not None:
+ med_slice = tuple(
+ slice(None) if i != axis else slice(end, end - num, -1)
+ for (i, x) in enumerate(arr.shape))
+ else:
+ med_slice = tuple(slice(None) for x in arr.shape)
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate median, reshape to add singleton dimension back
+ med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
+ _round_ifneeded(med_chunk, arr.dtype)
+
+ # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate(
+ (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
+
+
+def _prepend_min(arr, pad_amt, num, axis=-1):
+ """
+ Prepend `pad_amt` minimum values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to prepend.
+ num : int
+ Depth into `arr` along `axis` to calculate minimum.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values prepended along `axis`. The
+ prepended region is the minimum of the first `num` values along
+ `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _prepend_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ min_slice = tuple(slice(None) if i != axis else slice(num)
+ for (i, x) in enumerate(arr.shape))
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate min, reshape to add singleton dimension back
+ min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+
+ # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
+ axis=axis)
+
+
+def _append_min(arr, pad_amt, num, axis=-1):
+ """
+ Append `pad_amt` median values along `axis`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : int
+ Amount of padding to append.
+ num : int
+ Depth into `arr` along `axis` to calculate minimum.
+ Range: [1, `arr.shape[axis]`] or None (entire axis)
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt` values appended along `axis`. The
+ appended region is the minimum of the final `num` values along `axis`.
+
+ """
+ if pad_amt == 0:
+ return arr
+
+ # Equivalent to edge padding for single value, so do that instead
+ if num == 1:
+ return _append_edge(arr, pad_amt, axis)
+
+ # Use entire array if `num` is too large
+ if num is not None:
+ if num >= arr.shape[axis]:
+ num = None
+
+ # Slice a chunk from the edge to calculate stats on
+ end = arr.shape[axis] - 1
+ if num is not None:
+ min_slice = tuple(
+ slice(None) if i != axis else slice(end, end - num, -1)
+ for (i, x) in enumerate(arr.shape))
+ else:
+ min_slice = tuple(slice(None) for x in arr.shape)
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+
+ # Extract slice, calculate min, reshape to add singleton dimension back
+ min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
+
+ # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
+ return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
+ axis=axis)
+
+
+def _pad_ref(arr, pad_amt, method, axis=-1):
+ """
+ Pad `axis` of `arr` by reflection.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : tuple of ints, length 2
+ Padding to (prepend, append) along `axis`.
+ method : str
+ Controls method of reflection; options are 'even' or 'odd'.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
+ values appended along `axis`. Both regions are padded with reflected
+ values from the original array.
+
+ Notes
+ -----
+ This algorithm does not pad with repetition, i.e. the edges are not
+ repeated in the reflection. For that behavior, use `method='symmetric'`.
+
+ The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
+ single function, lest the indexing tricks in non-integer multiples of the
+ original shape would violate repetition in the final iteration.
+
+ """
+ # Implicit booleanness to test for zero (or None) in any scalar type
+ if pad_amt[0] == 0 and pad_amt[1] == 0:
+ return arr
+
+ ##########################################################################
+ # Prepended region
+
+ # Slice off a reverse indexed chunk from near edge to pad `arr` before
+ ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
+ for (i, x) in enumerate(arr.shape))
+
+ ref_chunk1 = arr[ref_slice]
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+ if pad_amt[0] == 1:
+ ref_chunk1 = ref_chunk1.reshape(pad_singleton)
+
+ # Memory/computationally more expensive, only do this if `method='odd'`
+ if 'odd' in method and pad_amt[0] > 0:
+ edge_slice1 = tuple(slice(None) if i != axis else 0
+ for (i, x) in enumerate(arr.shape))
+ edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ ref_chunk1 = 2 * edge_chunk - ref_chunk1
+ del edge_chunk
+
+ ##########################################################################
+ # Appended region
+
+ # Slice off a reverse indexed chunk from far edge to pad `arr` after
+ start = arr.shape[axis] - pad_amt[1] - 1
+ end = arr.shape[axis] - 1
+ ref_slice = tuple(slice(None) if i != axis else slice(start, end)
+ for (i, x) in enumerate(arr.shape))
+ rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
+ for (i, x) in enumerate(arr.shape))
+ ref_chunk2 = arr[ref_slice][rev_idx]
+
+ if pad_amt[1] == 1:
+ ref_chunk2 = ref_chunk2.reshape(pad_singleton)
+
+ if 'odd' in method:
+ edge_slice2 = tuple(slice(None) if i != axis else -1
+ for (i, x) in enumerate(arr.shape))
+ edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ ref_chunk2 = 2 * edge_chunk - ref_chunk2
+ del edge_chunk
+
+ # Concatenate `arr` with both chunks, extending along `axis`
+ return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
+
+
+def _pad_sym(arr, pad_amt, method, axis=-1):
+ """
+ Pad `axis` of `arr` by symmetry.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : tuple of ints, length 2
+ Padding to (prepend, append) along `axis`.
+ method : str
+ Controls method of symmetry; options are 'even' or 'odd'.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
+ values appended along `axis`. Both regions are padded with symmetric
+ values from the original array.
+
+ Notes
+ -----
+ This algorithm DOES pad with repetition, i.e. the edges are repeated.
+ For a method that does not repeat edges, use `method='reflect'`.
+
+ The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
+ single function, lest the indexing tricks in non-integer multiples of the
+ original shape would violate repetition in the final iteration.
+
+ """
+ # Implicit booleanness to test for zero (or None) in any scalar type
+ if pad_amt[0] == 0 and pad_amt[1] == 0:
+ return arr
+
+ ##########################################################################
+ # Prepended region
+
+ # Slice off a reverse indexed chunk from near edge to pad `arr` before
+ sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
+ for (i, x) in enumerate(arr.shape))
+ rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
+ for (i, x) in enumerate(arr.shape))
+ sym_chunk1 = arr[sym_slice][rev_idx]
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+ if pad_amt[0] == 1:
+ sym_chunk1 = sym_chunk1.reshape(pad_singleton)
+
+ # Memory/computationally more expensive, only do this if `method='odd'`
+ if 'odd' in method and pad_amt[0] > 0:
+ edge_slice1 = tuple(slice(None) if i != axis else 0
+ for (i, x) in enumerate(arr.shape))
+ edge_chunk = arr[edge_slice1].reshape(pad_singleton)
+ sym_chunk1 = 2 * edge_chunk - sym_chunk1
+ del edge_chunk
+
+ ##########################################################################
+ # Appended region
+
+ # Slice off a reverse indexed chunk from far edge to pad `arr` after
+ start = arr.shape[axis] - pad_amt[1]
+ end = arr.shape[axis]
+ sym_slice = tuple(slice(None) if i != axis else slice(start, end)
+ for (i, x) in enumerate(arr.shape))
+ sym_chunk2 = arr[sym_slice][rev_idx]
+
+ if pad_amt[1] == 1:
+ sym_chunk2 = sym_chunk2.reshape(pad_singleton)
+
+ if 'odd' in method:
+ edge_slice2 = tuple(slice(None) if i != axis else -1
+ for (i, x) in enumerate(arr.shape))
+ edge_chunk = arr[edge_slice2].reshape(pad_singleton)
+ sym_chunk2 = 2 * edge_chunk - sym_chunk2
+ del edge_chunk
+
+ # Concatenate `arr` with both chunks, extending along `axis`
+ return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
+
+
+def _pad_wrap(arr, pad_amt, axis=-1):
+ """
+ Pad `axis` of `arr` via wrapping.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array of arbitrary shape.
+ pad_amt : tuple of ints, length 2
+ Padding to (prepend, append) along `axis`.
+ axis : int
+ Axis along which to pad `arr`.
+
+ Returns
+ -------
+ padarr : ndarray
+ Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
+ values appended along `axis`. Both regions are padded wrapped values
+ from the opposite end of `axis`.
+
+ Notes
+ -----
+ This method of padding is also known as 'tile' or 'tiling'.
+
+ The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
+ single function, lest the indexing tricks in non-integer multiples of the
+ original shape would violate repetition in the final iteration.
+
+ """
+ # Implicit booleanness to test for zero (or None) in any scalar type
+ if pad_amt[0] == 0 and pad_amt[1] == 0:
+ return arr
+
+ ##########################################################################
+ # Prepended region
+
+ # Slice off a reverse indexed chunk from near edge to pad `arr` before
+ start = arr.shape[axis] - pad_amt[0]
+ end = arr.shape[axis]
+ wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
+ for (i, x) in enumerate(arr.shape))
+ wrap_chunk1 = arr[wrap_slice]
+
+ # Shape to restore singleton dimension after slicing
+ pad_singleton = tuple(x if i != axis else 1
+ for (i, x) in enumerate(arr.shape))
+ if pad_amt[0] == 1:
+ wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
+
+ ##########################################################################
+ # Appended region
+
+ # Slice off a reverse indexed chunk from far edge to pad `arr` after
+ wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
+ for (i, x) in enumerate(arr.shape))
+ wrap_chunk2 = arr[wrap_slice]
+
+ if pad_amt[1] == 1:
+ wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
+
+ # Concatenate `arr` with both chunks, extending along `axis`
+ return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
def _normalize_shape(narray, shape):
- '''
+ """
Private function which does some checks and normalizes the possibly
much simpler representations of 'pad_width', 'stat_length',
'constant_values', 'end_values'.
@@ -66,14 +1013,15 @@ def _normalize_shape(narray, shape):
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
- [[int1, int2], ] => ((int1, int2), (int1, int2), ...]
+ [[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
- [[int , ), ) => ((int, int), (int, int), ...)
+ [[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
- '''
+
+ """
normshp = None
shapelen = len(np.shape(narray))
- if (isinstance(shape, int)):
+ if (isinstance(shape, int)) or shape is None:
normshp = ((shape, shape), ) * shapelen
elif (isinstance(shape, (tuple, list))
and isinstance(shape[0], (tuple, list))
@@ -91,14 +1039,14 @@ def _normalize_shape(narray, shape):
and isinstance(shape[0], (int, float, long))
and len(shape) == 2):
normshp = (shape, ) * shapelen
- if normshp == None:
+ if normshp is None:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
return normshp
def _validate_lengths(narray, number_elements):
- '''
+ """
Private function which does some checks and reformats pad_width and
stat_length using _normalize_shape.
@@ -122,388 +1070,23 @@ def _validate_lengths(narray, number_elements):
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
- [[int1, int2], ] => ((int1, int2), (int1, int2), ...]
+ [[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
- [[int , ), ) => ((int, int), (int, int), ...)
+ [[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
- '''
- shapelen = len(np.shape(narray))
+
+ """
normshp = _normalize_shape(narray, number_elements)
for i in normshp:
- if i[0] < 0 or i[1] < 0:
- fmt ="%s cannot contain negative values."
+ chk = [1 if x is None else x for x in i]
+ chk = [1 if x >= 0 else -1 for x in chk]
+ if (chk[0] < 0) or (chk[1] < 0):
+ fmt = "%s cannot contain negative values."
raise ValueError(fmt % (number_elements,))
return normshp
-def _create_stat_vectors(vector, pad_tuple, iaxis, kwargs):
- '''
- Returns the portion of the vector required for any statistic.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Only 'stat_length' is used. 'stat_length'
- defaults to the entire vector if not supplied.
-
- Return
- ------
- _create_stat_vectors : ndarray
- The values from the original vector that will be used to calculate
- the statistic.
- '''
-
- # Can't have 0 represent the end if a slice... a[1:0] doesnt' work
- pt1 = -pad_tuple[1]
- if pt1 == 0:
- pt1 = None
-
- # Default is the entire vector from the original array.
- sbvec = vector[pad_tuple[0]:pt1]
- savec = vector[pad_tuple[0]:pt1]
-
- if kwargs['stat_length']:
- stat_length = kwargs['stat_length'][iaxis]
- sl0 = min(stat_length[0], len(sbvec))
- sl1 = min(stat_length[1], len(savec))
- sbvec = np.arange(0)
- savec = np.arange(0)
- if pad_tuple[0] > 0:
- sbvec = vector[pad_tuple[0]:pad_tuple[0] + sl0]
- if pad_tuple[1] > 0:
- savec = vector[-pad_tuple[1] - sl1:pt1]
- return (sbvec, savec)
-
-
-def _maximum(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_maximum.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Only 'stat_length' is used. 'stat_length'
- defaults to the entire vector if not supplied.
-
- Return
- ------
- _maximum : ndarray
- Padded vector
- '''
- sbvec, savec = _create_stat_vectors(vector, pad_tuple, iaxis, kwargs)
- return _create_vector(vector, pad_tuple, max(sbvec), max(savec))
-
-
-def _minimum(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_minimum.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Only 'stat_length' is used. 'stat_length'
- defaults to the entire vector if not supplied.
-
- Return
- ------
- _minimum : ndarray
- Padded vector
- '''
- sbvec, savec = _create_stat_vectors(vector, pad_tuple, iaxis, kwargs)
- return _create_vector(vector, pad_tuple, min(sbvec), min(savec))
-
-
-def _median(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_median.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Only 'stat_length' is used. 'stat_length'
- defaults to the entire vector if not supplied.
-
- Return
- ------
- _median : ndarray
- Padded vector
- '''
- sbvec, savec = _create_stat_vectors(vector, pad_tuple, iaxis, kwargs)
- return _create_vector(vector, pad_tuple, np.median(sbvec),
- np.median(savec))
-
-
-def _mean(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_mean.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Only 'stat_length' is used. 'stat_length'
- defaults to the entire vector if not supplied.
-
- Return
- ------
- _mean : ndarray
- Padded vector
- '''
- sbvec, savec = _create_stat_vectors(vector, pad_tuple, iaxis, kwargs)
- return _create_vector(vector, pad_tuple, np.average(sbvec),
- np.average(savec))
-
-
-def _constant(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for
- pad_constant.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across.
- kwargs : keyword arguments
- Keyword arguments. Need 'constant_values' keyword argument.
-
- Return
- ------
- _constant : ndarray
- Padded vector
- '''
- nconstant = kwargs['constant_values'][iaxis]
- return _create_vector(vector, pad_tuple, nconstant[0], nconstant[1])
-
-
-def _linear_ramp(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for
- pad_linear_ramp.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across. Not used in _linear_ramp.
- kwargs : keyword arguments
- Keyword arguments. Not used in _linear_ramp.
-
- Return
- ------
- _linear_ramp : ndarray
- Padded vector
- '''
- end_values = kwargs['end_values'][iaxis]
- before_delta = ((vector[pad_tuple[0]] - end_values[0])
- / float(pad_tuple[0]))
- after_delta = ((vector[-pad_tuple[1] - 1] - end_values[1])
- / float(pad_tuple[1]))
-
- before_vector = np.ones((pad_tuple[0], )) * end_values[0]
- before_vector = before_vector.astype(vector.dtype)
- for i in range(len(before_vector)):
- before_vector[i] = before_vector[i] + i * before_delta
-
- after_vector = np.ones((pad_tuple[1], )) * end_values[1]
- after_vector = after_vector.astype(vector.dtype)
- for i in range(len(after_vector)):
- after_vector[i] = after_vector[i] + i * after_delta
- after_vector = after_vector[::-1]
-
- return _create_vector(vector, pad_tuple, before_vector, after_vector)
-
-
-def _reflect(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_reflect.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across. Not used in _reflect.
- kwargs : keyword arguments
- Keyword arguments. Not used in _reflect.
-
- Return
- ------
- _reflect : ndarray
- Padded vector
- '''
- # Can't have pad_tuple[1] be used in the slice if == to 0.
- if pad_tuple[1] == 0:
- after_vector = vector[pad_tuple[0]:None]
- else:
- after_vector = vector[pad_tuple[0]:-pad_tuple[1]]
-
- reverse = after_vector[::-1]
-
- before_vector = np.resize(
- np.concatenate(
- (after_vector[1:-1], reverse)), pad_tuple[0])[::-1]
- after_vector = np.resize(
- np.concatenate(
- (reverse[1:-1], after_vector)), pad_tuple[1])
-
- if kwargs['reflect_type'] == 'even':
- pass
- elif kwargs['reflect_type'] == 'odd':
- before_vector = 2 * vector[pad_tuple[0]] - before_vector
- after_vector = 2 * vector[-pad_tuple[-1] - 1] - after_vector
- else:
- raise ValueError("The keyword '%s' cannot have the value '%s'."
- % ('reflect_type', kwargs['reflect_type']))
- return _create_vector(vector, pad_tuple, before_vector, after_vector)
-
-
-def _symmetric(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for
- pad_symmetric.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across. Not used in _symmetric.
- kwargs : keyword arguments
- Keyword arguments. Not used in _symmetric.
-
- Return
- ------
- _symmetric : ndarray
- Padded vector
- '''
- if pad_tuple[1] == 0:
- after_vector = vector[pad_tuple[0]:None]
- else:
- after_vector = vector[pad_tuple[0]:-pad_tuple[1]]
-
- before_vector = np.resize( np.concatenate( (after_vector,
- after_vector[::-1])), pad_tuple[0])[::-1]
- after_vector = np.resize( np.concatenate( (after_vector[::-1],
- after_vector)), pad_tuple[1])
-
- if kwargs['reflect_type'] == 'even':
- pass
- elif kwargs['reflect_type'] == 'odd':
- before_vector = 2 * vector[pad_tuple[0]] - before_vector
- after_vector = 2 * vector[-pad_tuple[1] - 1] - after_vector
- else:
- raise ValueError("The keyword '%s' cannot have the value '%s'."
- % ('reflect_type', kwargs['reflect_type']))
- return _create_vector(vector, pad_tuple, before_vector, after_vector)
-
-
-def _wrap(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_wrap.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across. Not used in _wrap.
- kwargs : keyword arguments
- Keyword arguments. Not used in _wrap.
-
- Return
- ------
- _wrap : ndarray
- Padded vector
- '''
- if pad_tuple[1] == 0:
- after_vector = vector[pad_tuple[0]:None]
- else:
- after_vector = vector[pad_tuple[0]:-pad_tuple[1]]
-
- before_vector = np.resize(after_vector[::-1], pad_tuple[0])[::-1]
- after_vector = np.resize(after_vector, pad_tuple[1])
-
- return _create_vector(vector, pad_tuple, before_vector, after_vector)
-
-
-def _edge(vector, pad_tuple, iaxis, kwargs):
- '''
- Private function to calculate the before/after vectors for pad_edge.
-
- Parameters
- ----------
- vector : ndarray
- Input vector that already includes empty padded values.
- pad_tuple : tuple
- This tuple represents the (before, after) width of the padding
- along this particular iaxis.
- iaxis : int
- The axis currently being looped across. Not used in _edge.
- kwargs : keyword arguments
- Keyword arguments. Not used in _edge.
-
- Return
- ------
- _edge : ndarray
- Padded vector
- '''
- return _create_vector(vector, pad_tuple, vector[pad_tuple[0]],
- vector[-pad_tuple[1] - 1])
-
-
-################################################################################
+###############################################################################
# Public functions
@@ -617,10 +1200,10 @@ def pad(array, pad_width, mode=None, **kwargs):
where
- vector: ndarray
+ vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
- iaxis_pad_width: tuple
+ iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
@@ -692,53 +1275,38 @@ def pad(array, pad_width, mode=None, **kwargs):
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
- """
+ """
narray = np.array(array)
pad_width = _validate_lengths(narray, pad_width)
- modefunc = {
- 'constant': _constant,
- 'edge': _edge,
- 'linear_ramp': _linear_ramp,
- 'maximum': _maximum,
- 'mean': _mean,
- 'median': _median,
- 'minimum': _minimum,
- 'reflect': _reflect,
- 'symmetric': _symmetric,
- 'wrap': _wrap,
- }
-
allowedkwargs = {
- 'constant': ['constant_values'],
- 'edge': [],
- 'linear_ramp': ['end_values'],
- 'maximum': ['stat_length'],
- 'mean': ['stat_length'],
- 'median': ['stat_length'],
- 'minimum': ['stat_length'],
- 'reflect': ['reflect_type'],
- 'symmetric': ['reflect_type'],
- 'wrap': [],
- }
+ 'constant': ['constant_values'],
+ 'edge': [],
+ 'linear_ramp': ['end_values'],
+ 'maximum': ['stat_length'],
+ 'mean': ['stat_length'],
+ 'median': ['stat_length'],
+ 'minimum': ['stat_length'],
+ 'reflect': ['reflect_type'],
+ 'symmetric': ['reflect_type'],
+ 'wrap': [],
+ }
kwdefaults = {
- 'stat_length': None,
- 'constant_values': 0,
- 'end_values': 0,
- 'reflect_type': 'even',
- }
+ 'stat_length': None,
+ 'constant_values': 0,
+ 'end_values': 0,
+ 'reflect_type': 'even',
+ }
if isinstance(mode, str):
- function = modefunc[mode]
-
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %
- (key, allowedkwargs[mode]))
+ (key, allowedkwargs[mode]))
# Set kwarg defaults
for kw in allowedkwargs[mode]:
@@ -746,36 +1314,156 @@ def pad(array, pad_width, mode=None, **kwargs):
# Need to only normalize particular keywords.
for i in kwargs:
- if i == 'stat_length' and kwargs[i]:
+ if i == 'stat_length':
kwargs[i] = _validate_lengths(narray, kwargs[i])
if i in ['end_values', 'constant_values']:
kwargs[i] = _normalize_shape(narray, kwargs[i])
- elif mode == None:
+ elif mode is None:
raise ValueError('Keyword "mode" must be a function or one of %s.' %
- (modefunc.keys(),))
+ (list(allowedkwargs.keys()),))
else:
- # User supplied function, I hope
+ # Drop back to old, slower np.apply_along_axis mode for user-supplied
+ # vector function
function = mode
- # Create a new padded array
- rank = range(len(narray.shape))
- total_dim_increase = [np.sum(pad_width[i]) for i in rank]
- offset_slices = [slice(pad_width[i][0],
- pad_width[i][0] + narray.shape[i])
- for i in rank]
- new_shape = np.array(narray.shape) + total_dim_increase
- newmat = np.zeros(new_shape).astype(narray.dtype)
-
- # Insert the original array into the padded array
- newmat[offset_slices] = narray
-
- # This is the core of pad ...
- for iaxis in rank:
- np.apply_along_axis(function,
- iaxis,
- newmat,
- pad_width[iaxis],
- iaxis,
- kwargs)
- return newmat
+ # Create a new padded array
+ rank = list(range(len(narray.shape)))
+ total_dim_increase = [np.sum(pad_width[i]) for i in rank]
+ offset_slices = [slice(pad_width[i][0],
+ pad_width[i][0] + narray.shape[i])
+ for i in rank]
+ new_shape = np.array(narray.shape) + total_dim_increase
+ newmat = np.zeros(new_shape).astype(narray.dtype)
+
+ # Insert the original array into the padded array
+ newmat[offset_slices] = narray
+
+ # This is the core of pad ...
+ for iaxis in rank:
+ np.apply_along_axis(function,
+ iaxis,
+ newmat,
+ pad_width[iaxis],
+ iaxis,
+ kwargs)
+ return newmat
+
+ # If we get here, use new padding method
+ newmat = narray.copy()
+
+ # API preserved, but completely new algorithm which pads by building the
+ # entire block to pad before/after `arr` with in one step, for each axis.
+ if mode == 'constant':
+ for axis, ((pad_before, pad_after), (before_val, after_val)) \
+ in enumerate(zip(pad_width, kwargs['constant_values'])):
+ newmat = _prepend_const(newmat, pad_before, before_val, axis)
+ newmat = _append_const(newmat, pad_after, after_val, axis)
+
+ elif mode == 'edge':
+ for axis, (pad_before, pad_after) in enumerate(pad_width):
+ newmat = _prepend_edge(newmat, pad_before, axis)
+ newmat = _append_edge(newmat, pad_after, axis)
+
+ elif mode == 'linear_ramp':
+ for axis, ((pad_before, pad_after), (before_val, after_val)) \
+ in enumerate(zip(pad_width, kwargs['end_values'])):
+ newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
+ newmat = _append_ramp(newmat, pad_after, after_val, axis)
+
+ elif mode == 'maximum':
+ for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
+ in enumerate(zip(pad_width, kwargs['stat_length'])):
+ newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
+ newmat = _append_max(newmat, pad_after, chunk_after, axis)
+
+ elif mode == 'mean':
+ for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
+ in enumerate(zip(pad_width, kwargs['stat_length'])):
+ newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
+ newmat = _append_mean(newmat, pad_after, chunk_after, axis)
+
+ elif mode == 'median':
+ for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
+ in enumerate(zip(pad_width, kwargs['stat_length'])):
+ newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
+ newmat = _append_med(newmat, pad_after, chunk_after, axis)
+
+ elif mode == 'minimum':
+ for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
+ in enumerate(zip(pad_width, kwargs['stat_length'])):
+ newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
+ newmat = _append_min(newmat, pad_after, chunk_after, axis)
+
+ elif mode == 'reflect':
+ for axis, (pad_before, pad_after) in enumerate(pad_width):
+ # Recursive padding along any axis where `pad_amt` is too large
+ # for indexing tricks. We can only safely pad the original axis
+ # length, to keep the period of the reflections consistent.
+ if ((pad_before > 0) or
+ (pad_after > 0)) and newmat.shape[axis] == 1:
+ # Extending singleton dimension for 'reflect' is legacy
+ # behavior; it really should raise an error.
+ newmat = _prepend_edge(newmat, pad_before, axis)
+ newmat = _append_edge(newmat, pad_after, axis)
+ continue
+
+ method = kwargs['reflect_type']
+ safe_pad = newmat.shape[axis] - 1
+ repeat = safe_pad
+ while ((pad_before > safe_pad) or (pad_after > safe_pad)):
+ offset = 0
+ pad_iter_b = min(safe_pad,
+ safe_pad * (pad_before // safe_pad))
+ pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
+ newmat = _pad_ref(newmat, (pad_iter_b,
+ pad_iter_a), method, axis)
+ pad_before -= pad_iter_b
+ pad_after -= pad_iter_a
+ if pad_iter_b > 0:
+ offset += 1
+ if pad_iter_a > 0:
+ offset += 1
+ safe_pad += pad_iter_b + pad_iter_a
+ newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
+
+ elif mode == 'symmetric':
+ for axis, (pad_before, pad_after) in enumerate(pad_width):
+ # Recursive padding along any axis where `pad_amt` is too large
+ # for indexing tricks. We can only safely pad the original axis
+ # length, to keep the period of the reflections consistent.
+ method = kwargs['reflect_type']
+ safe_pad = newmat.shape[axis]
+ repeat = safe_pad
+ while ((pad_before > safe_pad) or
+ (pad_after > safe_pad)):
+ pad_iter_b = min(safe_pad,
+ safe_pad * (pad_before // safe_pad))
+ pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
+ newmat = _pad_sym(newmat, (pad_iter_b,
+ pad_iter_a), method, axis)
+ pad_before -= pad_iter_b
+ pad_after -= pad_iter_a
+ safe_pad += pad_iter_b + pad_iter_a
+ newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
+
+ elif mode == 'wrap':
+ for axis, (pad_before, pad_after) in enumerate(pad_width):
+ # Recursive padding along any axis where `pad_amt` is too large
+ # for indexing tricks. We can only safely pad the original axis
+ # length, to keep the period of the reflections consistent.
+ safe_pad = newmat.shape[axis]
+ repeat = safe_pad
+ while ((pad_before > safe_pad) or
+ (pad_after > safe_pad)):
+ pad_iter_b = min(safe_pad,
+ safe_pad * (pad_before // safe_pad))
+ pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
+ newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
+
+ pad_before -= pad_iter_b
+ pad_after -= pad_iter_a
+ safe_pad += pad_iter_b + pad_iter_a
+
+ newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
+ return newmat
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 20a0e7151..5cd535703 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -22,7 +22,10 @@ thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
+
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d',
'unique', 'in1d']
@@ -278,7 +281,7 @@ def setxor1d(ar1, ar2, assume_unique=False):
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
-def in1d(ar1, ar2, assume_unique=False):
+def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
@@ -294,6 +297,13 @@ def in1d(ar1, ar2, assume_unique=False):
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
+ invert : bool, optional
+ If True, the values in the returned array are inverted (that is,
+ False where an element of `ar1` is in `ar2` and True otherwise).
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
+ to (but is faster than) ``np.invert(in1d(a, b))``.
+
+ .. versionadded:: 1.8.0
Returns
-------
@@ -322,13 +332,26 @@ def in1d(ar1, ar2, assume_unique=False):
array([ True, False, True, False, True], dtype=bool)
>>> test[mask]
array([0, 2, 0])
-
+ >>> mask = np.in1d(test, states, invert=True)
+ >>> mask
+ array([False, True, False, True, False], dtype=bool)
+ >>> test[mask]
+ array([1, 5])
"""
+ # Ravel both arrays, behavior for the first array could be different
+ ar1 = np.asarray(ar1).ravel()
+ ar2 = np.asarray(ar2).ravel()
+
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
- mask = np.zeros(len(ar1), dtype=np.bool)
- for a in ar2:
- mask |= (ar1 == a)
+ if invert:
+ mask = np.ones(len(ar1), dtype=np.bool)
+ for a in ar2:
+ mask &= (ar1 != a)
+ else:
+ mask = np.zeros(len(ar1), dtype=np.bool)
+ for a in ar2:
+ mask |= (ar1 == a)
return mask
# Otherwise use sorting
@@ -342,8 +365,11 @@ def in1d(ar1, ar2, assume_unique=False):
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
- equal_adj = (sar[1:] == sar[:-1])
- flag = np.concatenate( (equal_adj, [False] ) )
+ if invert:
+ bool_ar = (sar[1:] != sar[:-1])
+ else:
+ bool_ar = (sar[1:] == sar[:-1])
+ flag = np.concatenate( (bool_ar, [invert]) )
indx = order.argsort(kind='mergesort')[:len( ar1 )]
if assume_unique:
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index 2df05e514..c2cde574e 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -7,16 +7,16 @@ an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
+from __future__ import division, absolute_import, print_function
-from __future__ import division
-
+import sys
from operator import mul
+from functools import reduce
+
+from numpy.compat import long
__all__ = ['Arrayterator']
-import sys
-if sys.version_info[0] >= 3:
- from functools import reduce
class Arrayterator(object):
"""
@@ -188,7 +188,7 @@ class Arrayterator(object):
step = self.step[:]
ndims = len(self.var.shape)
- while 1:
+ while True:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 599a36198..8cac117c9 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -1,10 +1,15 @@
-# Some simple financial calculations
-# patterned after spreadsheet computations.
+"""Some simple financial calculations
+
+patterned after spreadsheet computations.
+
+There is some complexity in each function
+so that the functions behave like ufuncs with
+broadcasting and being able to be called with scalars
+or arrays (or other sequences).
+
+"""
+from __future__ import division, absolute_import, print_function
-# There is some complexity in each function
-# so that the functions behave like ufuncs with
-# broadcasting and being able to be called with scalars
-# or arrays (or other sequences).
import numpy as np
__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
@@ -110,7 +115,7 @@ def fv(rate, nper, pmt, pv, when='end'):
"""
when = _convert_when(when)
- rate, nper, pmt, pv, when = map(np.asarray, [rate, nper, pmt, pv, when])
+ (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when])
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pmt, pv, when)
zer = np.zeros(miter.shape)
@@ -201,7 +206,7 @@ def pmt(rate, nper, pv, fv=0, when='end'):
"""
when = _convert_when(when)
- rate, nper, pv, fv, when = map(np.asarray, [rate, nper, pv, fv, when])
+ (rate, nper, pv, fv, when) = map(np.asarray, [rate, nper, pv, fv, when])
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pv, fv, when)
zer = np.zeros(miter.shape)
@@ -258,17 +263,14 @@ def nper(rate, pmt, pv, fv=0, when='end'):
"""
when = _convert_when(when)
- rate, pmt, pv, fv, when = map(np.asarray, [rate, pmt, pv, fv, when])
+ (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when])
use_zero_rate = False
- old_err = np.seterr(divide="raise")
- try:
+ with np.errstate(divide="raise"):
try:
z = pmt*(1.0+rate*when)/rate
except FloatingPointError:
use_zero_rate = True
- finally:
- np.seterr(**old_err)
if use_zero_rate:
return (-fv + pv) / (pmt + 0.0)
@@ -497,7 +499,7 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
"""
when = _convert_when(when)
- rate, nper, pmt, fv, when = map(np.asarray, [rate, nper, pmt, fv, when])
+ (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when])
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pmt, fv, when)
zer = np.zeros(miter.shape)
@@ -563,7 +565,7 @@ def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
"""
when = _convert_when(when)
- nper, pmt, pv, fv, when = map(np.asarray, [nper, pmt, pv, fv, when])
+ (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when])
rn = guess
iter = 0
close = False
@@ -670,7 +672,7 @@ def npv(rate, values):
-----
Returns the result of: [G]_
- .. math :: \\sum_{t=0}^M{\\frac{values_t}{(1+rate)^{t}}}
+ .. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}}
References
----------
@@ -680,13 +682,13 @@ def npv(rate, values):
Examples
--------
>>> np.npv(0.281,[-100, 39, 59, 55, 20])
- -0.0066187288356340801
+ -0.0084785916384548798
(Compare with the Example given for numpy.lib.financial.irr)
"""
values = np.asarray(values)
- return (values / (1+rate)**np.arange(1,len(values)+1)).sum(axis=0)
+ return (values / (1+rate)**np.arange(0,len(values))).sum(axis=0)
def mirr(values, finance_rate, reinvest_rate):
"""
@@ -715,8 +717,8 @@ def mirr(values, finance_rate, reinvest_rate):
neg = values < 0
if not (pos.any() and neg.any()):
return np.nan
- numer = np.abs(npv(reinvest_rate, values*pos))*(1 + reinvest_rate)
- denom = np.abs(npv(finance_rate, values*neg))*(1 + finance_rate)
+ numer = np.abs(npv(reinvest_rate, values*pos))
+ denom = np.abs(npv(finance_rate, values*neg))
return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1
if __name__ == '__main__':
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 1e508f3e5..fd459e84e 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -35,7 +35,7 @@ Capabilities
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
- able create a solution in his preferred programming language to
+ able to create a solution in his preferred programming language to
read most ``.npy`` files that he has been given without much
documentation.
@@ -134,16 +134,21 @@ The ``.npy`` format, including reasons for creating it and a comparison of
alternatives, is described fully in the "npy-format" NEP.
"""
-
-import cPickle
+from __future__ import division, absolute_import, print_function
import numpy
import sys
from numpy.lib.utils import safe_eval
-from numpy.compat import asbytes, isfileobj
+from numpy.compat import asbytes, isfileobj, long, basestring
+
+if sys.version_info[0] >= 3:
+ import pickle
+else:
+ import cPickle as pickle
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
+BUFFER_SIZE = 2 ** 18 #size of buffer for reading npz files in bytes
def magic(major, minor):
""" Return the magic string for the given file format version.
@@ -310,7 +315,7 @@ def read_array_header_1_0(fp):
Raises
------
- ValueError :
+ ValueError
If the data is invalid.
"""
@@ -334,14 +339,13 @@ def read_array_header_1_0(fp):
# "descr" : dtype.descr
try:
d = safe_eval(header)
- except SyntaxError, e:
+ except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
- keys = d.keys()
- keys.sort()
+ keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
@@ -356,7 +360,7 @@ def read_array_header_1_0(fp):
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = numpy.dtype(d['descr'])
- except TypeError, e:
+ except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
@@ -398,7 +402,7 @@ def write_array(fp, array, version=(1,0)):
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data directly.
# Instead, we will pickle it out with version 2 of the pickle protocol.
- cPickle.dump(array, fp, protocol=2)
+ pickle.dump(array, fp, protocol=2)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
@@ -446,7 +450,7 @@ def read_array(fp):
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
- array = cPickle.load(fp)
+ array = pickle.load(fp)
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
@@ -454,9 +458,22 @@ def read_array(fp):
else:
# This is not a real file. We have to read it the memory-intensive
# way.
- # XXX: we can probably chunk this to avoid the memory hit.
- data = fp.read(int(count * dtype.itemsize))
- array = numpy.fromstring(data, dtype=dtype, count=count)
+ # crc32 module fails on reads greater than 2 ** 32 bytes, breaking
+ # large reads from gzip streams. Chunk reads to BUFFER_SIZE bytes to
+ # avoid issue and reduce memory overhead of the read. In
+ # non-chunked case count < max_read_count, so only one read is
+ # performed.
+
+ max_read_count = BUFFER_SIZE // dtype.itemsize
+
+ array = numpy.empty(count, dtype=dtype)
+
+ for i in range(0, count, max_read_count):
+ read_count = min(max_read_count, count - i)
+
+ data = fp.read(int(read_count * dtype.itemsize))
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
+ count=read_count)
if fortran_order:
array.shape = shape[::-1]
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 65f4ecb05..9336579af 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,13 +1,14 @@
+from __future__ import division, absolute_import, print_function
+
__docformat__ = "restructuredtext en"
-__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
- 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
- 'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
- 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
- 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
- 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
- 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
- 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
- 'add_newdoc_ufunc']
+__all__ = [
+ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
+ 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
+ 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
+ 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
+ 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
+ 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc']
import warnings
import types
@@ -20,17 +21,21 @@ from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
-from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
+from numpy.core.fromnumeric import ravel, nonzero, choose, sort, partition, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
-from _compiled_base import _insert, add_docstring
-from _compiled_base import digitize, bincount, interp as compiled_interp
-from arraysetops import setdiff1d
-from utils import deprecate
-from _compiled_base import add_newdoc_ufunc
+from ._compiled_base import _insert, add_docstring
+from ._compiled_base import digitize, bincount, interp as compiled_interp
+from .utils import deprecate
+from ._compiled_base import add_newdoc_ufunc
import numpy as np
+import collections
+from numpy.compat import long
+# Force range to be a generator, for np.delete's usage.
+if sys.version_info[0] < 3:
+ range = xrange
def iterable(y):
"""
@@ -707,7 +712,7 @@ def piecewise(x, condlist, funclist, *args, **kw):
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
- if not callable(item):
+ if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
@@ -956,7 +961,7 @@ def diff(a, n=1, axis=-1):
See Also
--------
- gradient, ediff1d
+ gradient, ediff1d, cumsum
Examples
--------
@@ -1267,8 +1272,7 @@ def unique(x):
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
- items = list(set(x))
- items.sort()
+ items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
@@ -1356,309 +1360,6 @@ def place(arr, mask, vals):
"""
return _insert(arr, mask, vals)
-def _nanop(op, fill, a, axis=None):
- """
- General operation on arrays with not-a-number values.
-
- Parameters
- ----------
- op : callable
- Operation to perform.
- fill : float
- NaN values are set to fill before doing the operation.
- a : array-like
- Input array.
- axis : {int, None}, optional
- Axis along which the operation is computed.
- By default the input is flattened.
-
- Returns
- -------
- y : {ndarray, scalar}
- Processed data.
-
- """
- y = array(a, subok=True)
-
- # We only need to take care of NaN's in floating point arrays
- if np.issubdtype(y.dtype, np.integer):
- return op(y, axis=axis)
- mask = isnan(a)
- # y[mask] = fill
- # We can't use fancy indexing here as it'll mess w/ MaskedArrays
- # Instead, let's fill the array directly...
- np.copyto(y, fill, where=mask)
- res = op(y, axis=axis)
- mask_all_along_axis = mask.all(axis=axis)
-
- # Along some axes, only nan's were encountered. As such, any values
- # calculated along that axis should be set to nan.
- if mask_all_along_axis.any():
- if np.isscalar(res):
- res = np.nan
- else:
- res[mask_all_along_axis] = np.nan
-
- return res
-
-def nansum(a, axis=None):
- """
- Return the sum of array elements over a given axis treating
- Not a Numbers (NaNs) as zero.
-
- Parameters
- ----------
- a : array_like
- Array containing numbers whose sum is desired. If `a` is not an
- array, a conversion is attempted.
- axis : int, optional
- Axis along which the sum is computed. The default is to compute
- the sum of the flattened array.
-
- Returns
- -------
- y : ndarray
- An array with the same shape as a, with the specified axis removed.
- If a is a 0-d array, or if axis is None, a scalar is returned with
- the same dtype as `a`.
-
- See Also
- --------
- numpy.sum : Sum across array including Not a Numbers.
- isnan : Shows which elements are Not a Number (NaN).
- isfinite: Shows which elements are not: Not a Number, positive and
- negative infinity
-
- Notes
- -----
- Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
- If positive or negative infinity are present the result is positive or
- negative infinity. But if both positive and negative infinity are present,
- the result is Not A Number (NaN).
-
- Arithmetic is modular when using integer types (all elements of `a` must
- be finite i.e. no elements that are NaNs, positive infinity and negative
- infinity because NaNs are floating point types), and no error is raised
- on overflow.
-
-
- Examples
- --------
- >>> np.nansum(1)
- 1
- >>> np.nansum([1])
- 1
- >>> np.nansum([1, np.nan])
- 1.0
- >>> a = np.array([[1, 1], [1, np.nan]])
- >>> np.nansum(a)
- 3.0
- >>> np.nansum(a, axis=0)
- array([ 2., 1.])
-
- When positive infinity and negative infinity are present
-
- >>> np.nansum([1, np.nan, np.inf])
- inf
- >>> np.nansum([1, np.nan, np.NINF])
- -inf
- >>> np.nansum([1, np.nan, np.inf, np.NINF])
- nan
-
- """
- return _nanop(np.sum, 0, a, axis)
-
-def nanmin(a, axis=None):
- """
- Return the minimum of an array or minimum along an axis ignoring any NaNs.
-
- Parameters
- ----------
- a : array_like
- Array containing numbers whose minimum is desired.
- axis : int, optional
- Axis along which the minimum is computed.The default is to compute
- the minimum of the flattened array.
-
- Returns
- -------
- nanmin : ndarray
- A new array or a scalar array with the result.
-
- See Also
- --------
- numpy.amin : Minimum across array including any Not a Numbers.
- numpy.nanmax : Maximum across array ignoring any Not a Numbers.
- isnan : Shows which elements are Not a Number (NaN).
- isfinite: Shows which elements are not: Not a Number, positive and
- negative infinity
-
- Notes
- -----
- Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
- Positive infinity is treated as a very large number and negative infinity
- is treated as a very small (i.e. negative) number.
-
- If the input has a integer type the function is equivalent to np.min.
-
-
- Examples
- --------
- >>> a = np.array([[1, 2], [3, np.nan]])
- >>> np.nanmin(a)
- 1.0
- >>> np.nanmin(a, axis=0)
- array([ 1., 2.])
- >>> np.nanmin(a, axis=1)
- array([ 1., 3.])
-
- When positive infinity and negative infinity are present:
-
- >>> np.nanmin([1, 2, np.nan, np.inf])
- 1.0
- >>> np.nanmin([1, 2, np.nan, np.NINF])
- -inf
-
- """
- a = np.asanyarray(a)
- if axis is not None:
- return np.fmin.reduce(a, axis)
- else:
- return np.fmin.reduce(a.flat)
-
-def nanargmin(a, axis=None):
- """
- Return indices of the minimum values over an axis, ignoring NaNs.
-
- Parameters
- ----------
- a : array_like
- Input data.
- axis : int, optional
- Axis along which to operate. By default flattened input is used.
-
- Returns
- -------
- index_array : ndarray
- An array of indices or a single index value.
-
- See Also
- --------
- argmin, nanargmax
-
- Examples
- --------
- >>> a = np.array([[np.nan, 4], [2, 3]])
- >>> np.argmin(a)
- 0
- >>> np.nanargmin(a)
- 2
- >>> np.nanargmin(a, axis=0)
- array([1, 1])
- >>> np.nanargmin(a, axis=1)
- array([1, 0])
-
- """
- return _nanop(np.argmin, np.inf, a, axis)
-
-def nanmax(a, axis=None):
- """
- Return the maximum of an array or maximum along an axis ignoring any NaNs.
-
- Parameters
- ----------
- a : array_like
- Array containing numbers whose maximum is desired. If `a` is not
- an array, a conversion is attempted.
- axis : int, optional
- Axis along which the maximum is computed. The default is to compute
- the maximum of the flattened array.
-
- Returns
- -------
- nanmax : ndarray
- An array with the same shape as `a`, with the specified axis removed.
- If `a` is a 0-d array, or if axis is None, a ndarray scalar is
- returned. The the same dtype as `a` is returned.
-
- See Also
- --------
- numpy.amax : Maximum across array including any Not a Numbers.
- numpy.nanmin : Minimum across array ignoring any Not a Numbers.
- isnan : Shows which elements are Not a Number (NaN).
- isfinite: Shows which elements are not: Not a Number, positive and
- negative infinity
-
- Notes
- -----
- Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
- Positive infinity is treated as a very large number and negative infinity
- is treated as a very small (i.e. negative) number.
-
- If the input has a integer type the function is equivalent to np.max.
-
- Examples
- --------
- >>> a = np.array([[1, 2], [3, np.nan]])
- >>> np.nanmax(a)
- 3.0
- >>> np.nanmax(a, axis=0)
- array([ 3., 2.])
- >>> np.nanmax(a, axis=1)
- array([ 2., 3.])
-
- When positive infinity and negative infinity are present:
-
- >>> np.nanmax([1, 2, np.nan, np.NINF])
- 2.0
- >>> np.nanmax([1, 2, np.nan, np.inf])
- inf
-
- """
- a = np.asanyarray(a)
- if axis is not None:
- return np.fmax.reduce(a, axis)
- else:
- return np.fmax.reduce(a.flat)
-
-def nanargmax(a, axis=None):
- """
- Return indices of the maximum values over an axis, ignoring NaNs.
-
- Parameters
- ----------
- a : array_like
- Input data.
- axis : int, optional
- Axis along which to operate. By default flattened input is used.
-
- Returns
- -------
- index_array : ndarray
- An array of indices or a single index value.
-
- See Also
- --------
- argmax, nanargmin
-
- Examples
- --------
- >>> a = np.array([[np.nan, 4], [2, 3]])
- >>> np.argmax(a)
- 0
- >>> np.nanargmax(a)
- 1
- >>> np.nanargmax(a, axis=0)
- array([1, 0])
- >>> np.nanargmax(a, axis=1)
- array([1, 1])
-
- """
- return _nanop(np.argmax, -np.inf, a, axis)
-
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
@@ -1732,9 +1433,9 @@ class vectorize(object):
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
-
+
.. versionadded:: 1.7.0
-
+
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
@@ -1861,7 +1562,7 @@ class vectorize(object):
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
- the_args[_i] = vargs[_n]
+ the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
@@ -1922,7 +1623,7 @@ class vectorize(object):
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
-
+
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
@@ -1931,8 +1632,8 @@ class vectorize(object):
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
- inputs = [array(_a, copy=False, subok=True, dtype=object)
- for _a in args]
+ inputs = [array(_a, copy=False, subok=True, dtype=object)
+ for _a in args]
outputs = ufunc(*inputs)
@@ -2584,7 +2285,7 @@ def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
- for i in xrange(1,len(vals)):
+ for i in range(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
@@ -2912,7 +2613,7 @@ def median(a, axis=None, out=None, overwrite_input=False):
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
- overwrite_input : bool optional
+ overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
@@ -2969,30 +2670,50 @@ def median(a, axis=None, out=None, overwrite_input=False):
>>> assert not np.all(a==b)
"""
+ if axis is not None and axis >= a.ndim:
+ raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
+
if overwrite_input:
if axis is None:
- sorted = a.ravel()
- sorted.sort()
+ part = a.ravel()
+ sz = part.size
+ if sz % 2 == 0:
+ szh = sz // 2
+ part.partition((szh - 1, szh))
+ else:
+ part.partition((sz - 1) // 2)
else:
- a.sort(axis=axis)
- sorted = a
+ sz = a.shape[axis]
+ if sz % 2 == 0:
+ szh = sz // 2
+ a.partition((szh - 1, szh), axis=axis)
+ else:
+ a.partition((sz - 1) // 2, axis=axis)
+ part = a
else:
- sorted = sort(a, axis=axis)
- if sorted.shape == ():
+ if axis is None:
+ sz = a.size
+ else:
+ sz = a.shape[axis]
+ if sz % 2 == 0:
+ part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
+ else:
+ part = partition(a, (sz - 1) // 2, axis=axis)
+ if part.shape == ():
# make 0-D arrays work
- return sorted.item()
+ return part.item()
if axis is None:
axis = 0
- indexer = [slice(None)] * sorted.ndim
- index = int(sorted.shape[axis]/2)
- if sorted.shape[axis] % 2 == 1:
+ indexer = [slice(None)] * part.ndim
+ index = part.shape[axis] // 2
+ if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
- return mean(sorted[indexer], axis=axis, out=out)
+ return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
@@ -3051,7 +2772,7 @@ def percentile(a, q, axis=None, out=None, overwrite_input=False):
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
- >>> np.percentile(a, 0.5, axis=0)
+ >>> np.percentile(a, 50, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
@@ -3237,7 +2958,7 @@ def add_newdoc(place, obj, doc):
"""
try:
new = {}
- exec 'from %s import %s' % (place, obj) in new
+ exec('from %s import %s' % (place, obj), new)
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
@@ -3381,7 +3102,8 @@ def meshgrid(*xi, **kwargs):
def delete(arr, obj, axis=None):
"""
- Return a new array with sub-arrays along an axis deleted.
+ Return a new array with sub-arrays along an axis deleted. For a one
+ dimensional array, this returns those entries not returned by `arr[obj]`.
Parameters
----------
@@ -3405,6 +3127,15 @@ def delete(arr, obj, axis=None):
insert : Insert elements into an array.
append : Append elements at the end of an array.
+ Notes
+ -----
+ Often it is preferable to use a boolean mask. For example:
+ >>> mask = np.ones(len(arr), dtype=bool)
+ >>> mask[[0,2,4]] = False
+ >>> result = arr[mask,...]
+ Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
+ use of `mask`.
+
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
@@ -3431,7 +3162,6 @@ def delete(arr, obj, axis=None):
except AttributeError:
pass
-
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
@@ -3440,34 +3170,35 @@ def delete(arr, obj, axis=None):
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
+ warnings.warn("in the future the special handling of scalars "
+ "will be removed from delete and raise an error",
+ DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
+
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
- if isinstance(obj, (int, long, integer)):
- if (obj < 0): obj += N
- if (obj < 0 or obj >=N):
- raise ValueError(
- "invalid entry")
- newshape[axis]-=1;
- new = empty(newshape, arr.dtype, arr.flags.fnc)
- slobj[axis] = slice(None, obj)
- new[slobj] = arr[slobj]
- slobj[axis] = slice(obj,None)
- slobj2 = [slice(None)]*ndim
- slobj2[axis] = slice(obj+1,None)
- new[slobj] = arr[slobj2]
- elif isinstance(obj, slice):
+
+ if isinstance(obj, slice):
start, stop, step = obj.indices(N)
- numtodel = len(xrange(start, stop, step))
+ xr = range(start, stop, step)
+ numtodel = len(xr)
+
if numtodel <= 0:
if wrap:
- return wrap(new)
+ return wrap(arr.copy())
else:
return arr.copy()
+
+ # Invert if step is negative:
+ if step < 0:
+ step = -step
+ start = xr[-1]
+ stop = xr[0] + 1
+
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
@@ -3488,24 +3219,77 @@ def delete(arr, obj, axis=None):
if step == 1:
pass
else: # use array indexing.
- obj = arange(start, stop, step, dtype=intp)
- all = arange(start, stop, dtype=intp)
- obj = setdiff1d(all, obj)
+ keep = ones(stop-start, dtype=bool)
+ keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
- slobj2[axis] = obj
+ slobj2[axis] = slice(start, stop)
+ arr = arr[slobj2]
+ slobj2[axis] = keep
new[slobj] = arr[slobj2]
- else: # default behavior
- obj = array(obj, dtype=intp, copy=0, ndmin=1)
- all = arange(N, dtype=intp)
- obj = setdiff1d(all, obj)
- slobj[axis] = obj
+ if wrap:
+ return wrap(new)
+ else:
+ return new
+
+ _obj = obj
+ obj = np.asarray(obj)
+ # After removing the special handling of booleans and out of
+ # bounds values, the conversion to the array can be removed.
+ if obj.dtype == bool:
+ warnings.warn("in the future insert will treat boolean arrays "
+ "and array-likes as boolean index instead "
+ "of casting it to integer", FutureWarning)
+ obj = obj.astype(intp)
+ if isinstance(_obj, (int, long, integer)):
+ # optimization for a single value
+ obj = obj.item()
+ if (obj < -N or obj >=N):
+ raise IndexError("index %i is out of bounds for axis "
+ "%i with size %i" % (obj, axis, N))
+ if (obj < 0): obj += N
+ newshape[axis]-=1;
+ new = empty(newshape, arr.dtype, arr.flags.fnc)
+ slobj[axis] = slice(None, obj)
+ new[slobj] = arr[slobj]
+ slobj[axis] = slice(obj,None)
+ slobj2 = [slice(None)]*ndim
+ slobj2[axis] = slice(obj+1,None)
+ new[slobj] = arr[slobj2]
+ else:
+ if obj.size == 0 and not isinstance(_obj, np.ndarray):
+ obj = obj.astype(intp)
+ if not np.can_cast(obj, intp, 'same_kind'):
+ # obj.size = 1 special case always failed and would just
+ # give superfluous warnings.
+ warnings.warn("using a non-integer array as obj in delete "
+ "will result in an error in the future", DeprecationWarning)
+ obj = obj.astype(intp)
+ keep = ones(N, dtype=bool)
+
+ # Test if there are out of bound indices, this is deprecated
+ inside_bounds = (obj < N) & (obj >= -N)
+ if not inside_bounds.all():
+ warnings.warn("in the future out of bounds indices will raise an "
+ "error instead of being ignored by `numpy.delete`.",
+ DeprecationWarning)
+ obj = obj[inside_bounds]
+ positive_indices = obj >= 0
+ if not positive_indices.all():
+ warnings.warn("in the future negative indices will not be ignored "
+ "by `numpy.delete`.", FutureWarning)
+ obj = obj[positive_indices]
+
+ keep[obj,] = False
+ slobj[axis] = keep
new = arr[slobj]
+
if wrap:
return wrap(new)
else:
return new
+
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
@@ -3517,9 +3301,16 @@ def insert(arr, obj, values, axis=None):
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
+
+ .. versionadded:: 1.8.0
+
+ Support for multiple insertions when `obj` is a single scalar or a
+ sequence with one element (similar to calling insert multiple times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
+ `values` should be shaped so that ``arr[...,obj,...] = values``
+ is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
@@ -3534,8 +3325,15 @@ def insert(arr, obj, values, axis=None):
See Also
--------
append : Append elements at the end of an array.
+ concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
+ Notes
+ -----
+ Note that for higher dimensional inserts `obj=0` behaves very different
+ from `obj=[0]` just like `arr[:,0,:] = values` is different from
+ `arr[:,[0],:] = values`.
+
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
@@ -3550,6 +3348,15 @@ def insert(arr, obj, values, axis=None):
[2, 5, 2],
[3, 5, 3]])
+ Difference between sequence and scalars:
+ >>> np.insert(a, [1], [[1],[2],[3]], axis=1)
+ array([[1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]])
+ >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
+ ... np.insert(a, [1], [[1],[2],[3]], axis=1))
+ True
+
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
@@ -3583,7 +3390,15 @@ def insert(arr, obj, values, axis=None):
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
+ else:
+ if ndim > 0 and (axis < -ndim or axis >= ndim):
+ raise IndexError("axis %i is out of bounds for an array "
+ "of dimension %i" % (axis, ndim))
+ if (axis < 0): axis += ndim
if (ndim == 0):
+ warnings.warn("in the future the special handling of scalars "
+ "will be removed from insert and raise an error",
+ DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
@@ -3594,38 +3409,77 @@ def insert(arr, obj, values, axis=None):
N = arr.shape[axis]
newshape = list(arr.shape)
- if isinstance(obj, (int, long, integer)):
- if (obj < 0): obj += N
- if obj < 0 or obj > N:
- raise ValueError(
- "index (%d) out of range (0<=index<=%d) "\
- "in dimension %d" % (obj, N, axis))
- if isscalar(values):
- obj = [obj]
- else:
- values = asarray(values)
- if ndim > values.ndim:
- obj = [obj]
- else:
- obj = [obj] * len(values)
-
- elif isinstance(obj, slice):
+ if isinstance(obj, slice):
# turn it into a range object
- obj = arange(*obj.indices(N),**{'dtype':intp})
+ indices = arange(*obj.indices(N),**{'dtype':intp})
+ else:
+ # need to copy obj, because indices will be changed in-place
+ indices = np.array(obj)
+ if indices.dtype == bool:
+ # See also delete
+ warnings.warn("in the future insert will treat boolean arrays "
+ "and array-likes as a boolean index instead "
+ "of casting it to integer", FutureWarning)
+ indices = indices.astype(intp)
+ # Code after warning period:
+ #if obj.ndim != 1:
+ # raise ValueError('boolean array argument obj to insert '
+ # 'must be one dimensional')
+ #indices = np.flatnonzero(obj)
+ elif indices.ndim > 1:
+ raise ValueError("index array argument obj to insert must "
+ "be one dimensional or scalar")
+ if indices.size == 1:
+ index = indices.item()
+ if index < -N or index > N:
+ raise IndexError("index %i is out of bounds for axis "
+ "%i with size %i" % (obj, axis, N))
+ if (index < 0): index += N
+
+ values = array(values, copy=False, ndmin=arr.ndim)
+ if indices.ndim == 0:
+ # broadcasting is very different here, since a[:,0,:] = ... behaves
+ # very different from a[:,[0],:] = ...! This changes values so that
+ # it works likes the second case. (here a[:,0:1,:])
+ values = np.rollaxis(values, 0, axis + 1)
+ numnew = values.shape[axis]
+ newshape[axis] += numnew
+ new = empty(newshape, arr.dtype, arr.flags.fnc)
+ slobj[axis] = slice(None, index)
+ new[slobj] = arr[slobj]
+ slobj[axis] = slice(index, index+numnew)
+ new[slobj] = values
+ slobj[axis] = slice(index+numnew, None)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(index, None)
+ new[slobj] = arr[slobj2]
+ if wrap:
+ return wrap(new)
+ return new
+ elif indices.size == 0 and not isinstance(obj, np.ndarray):
+ # Can safely cast the empty list to intp
+ indices = indices.astype(intp)
- # get two sets of indices
- # one is the indices which will hold the new stuff
- # two is the indices where arr will be copied over
+ if not np.can_cast(indices, intp, 'same_kind'):
+ warnings.warn("using a non-integer array as obj in insert "
+ "will result in an error in the future",
+ DeprecationWarning)
+ indices = indices.astype(intp)
+
+ indices[indices < 0] += N
+
+ numnew = len(indices)
+ order = indices.argsort(kind='mergesort') # stable sort
+ indices[order] += np.arange(numnew)
- obj = asarray(obj, dtype=intp)
- numnew = len(obj)
- index1 = obj + arange(numnew)
- index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
+ old_mask = ones(newshape[axis], dtype=bool)
+ old_mask[indices] = False
+
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
- slobj[axis] = index1
- slobj2[axis] = index2
+ slobj[axis] = indices
+ slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index b07fde27d..ca3d60869 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['ravel_multi_index',
'unravel_index',
'mgrid',
@@ -14,9 +16,9 @@ from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
from numpy.core.numerictypes import find_common_type
import math
-import function_base
+from . import function_base
import numpy.matrixlib as matrix
-from function_base import diff
+from .function_base import diff
from numpy.lib._compiled_base import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
@@ -155,14 +157,14 @@ class nd_grid(object):
size.append(int(abs(step)))
typ = float
else:
- size.append(math.ceil((key[k].stop - start)/(step*1.0)))
+ size.append(int(math.ceil((key[k].stop - start)/(step*1.0))))
if isinstance(step, float) or \
isinstance(start, float) or \
isinstance(key[k].stop, float):
typ = float
if self.sparse:
- nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \
- (typ,)*len(size))
+ nn = [_nx.arange(_x, dtype=_t)
+ for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
@@ -242,7 +244,7 @@ class AxisConcatenator(object):
frame = sys._getframe().f_back
mymat = matrix.bmat(key,frame.f_globals,frame.f_locals)
return mymat
- if type(key) is not tuple:
+ if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
@@ -250,7 +252,7 @@ class AxisConcatenator(object):
scalartypes = []
for k in range(len(key)):
scalar = False
- if type(key[k]) is slice:
+ if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
@@ -305,7 +307,7 @@ class AxisConcatenator(object):
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
- defaxes = range(ndmin)
+ defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
@@ -485,7 +487,7 @@ class ndenumerate(object):
def __init__(self, arr):
self.iter = asarray(arr).flat
- def next(self):
+ def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
@@ -497,11 +499,13 @@ class ndenumerate(object):
The array element of the current iteration.
"""
- return self.iter.coords, self.iter.next()
+ return self.iter.coords, next(self.iter)
def __iter__(self):
return self
+ next = __next__
+
class ndindex(object):
"""
@@ -533,6 +537,8 @@ class ndindex(object):
"""
def __init__(self, *shape):
+ if len(shape) == 1 and isinstance(shape[0], tuple):
+ shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index'], order='C')
@@ -545,9 +551,9 @@ class ndindex(object):
This method is for backward compatibility only: do not use.
"""
- self.next()
+ next(self)
- def next(self):
+ def __next__(self):
"""
Standard iterator method, updates the index and returns the index tuple.
@@ -557,9 +563,11 @@ class ndindex(object):
Returns a tuple containing the indices of the current iteration.
"""
- self._it.next()
+ next(self._it)
return self._it.multi_index
+ next = __next__
+
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
@@ -619,7 +627,7 @@ class IndexExpression(object):
self.maketuple = maketuple
def __getitem__(self, item):
- if self.maketuple and type(item) != tuple:
+ if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
@@ -649,7 +657,8 @@ def fill_diagonal(a, val, wrap=False):
Value to be written on the diagonal, its type must be compatible with
that of the array a.
- wrap: bool For tall matrices in NumPy version up to 1.6.2, the
+ wrap : bool
+ For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affect only tall matrices.
diff --git a/numpy/lib/info.py b/numpy/lib/info.py
index 4a781a2ca..94eeae503 100644
--- a/numpy/lib/info.py
+++ b/numpy/lib/info.py
@@ -145,6 +145,7 @@ setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
"""
+from __future__ import division, absolute_import, print_function
depends = ['core','testing']
global_symbols = ['*']
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
new file mode 100644
index 000000000..81f5aee2e
--- /dev/null
+++ b/numpy/lib/nanfunctions.py
@@ -0,0 +1,812 @@
+"""
+Functions that ignore NaN.
+
+Functions
+---------
+
+- `nanmin` -- minimum non-NaN value
+- `nanmax` -- maximum non-NaN value
+- `nanargmin` -- index of minimum non-NaN value
+- `nanargmax` -- index of maximum non-NaN value
+- `nansum` -- sum of non-NaN values
+- `nanmean` -- mean of non-NaN values
+- `nanvar` -- variance of non-NaN values
+- `nanstd` -- standard deviation of non-NaN values
+
+Classes
+-------
+- `NanWarning` -- Warning raised by nanfunctions
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import numpy as np
+
+__all__ = [
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
+ 'nanvar', 'nanstd', 'NanWarning'
+ ]
+
+class NanWarning(RuntimeWarning): pass
+
+
+def _replace_nan(a, val):
+ """
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
+ the `val` value, and return the copy together with a boolean mask
+ marking the locations where NaNs were present. If `a` is not of
+ inexact type, do nothing and return `a` together with a mask of None.
+
+ Parameters
+ ----------
+ a : array-like
+ Input array.
+ val : float
+ NaN values are set to val before doing the operation.
+
+ Returns
+ -------
+ y : ndarray
+ If `a` is of inexact type, return a copy of `a` with the NaNs
+ replaced by the fill value, otherwise return `a`.
+ mask: {bool, None}
+ If `a` is of inexact type, return a boolean mask marking locations of
+ NaNs, otherwise return None.
+
+ """
+ is_new = not isinstance(a, np.ndarray)
+ if is_new:
+ a = np.array(a)
+ if not issubclass(a.dtype.type, np.inexact):
+ return a, None
+ if not is_new:
+ # need copy
+ a = np.array(a, subok=True)
+
+ mask = np.isnan(a)
+ np.copyto(a, val, where=mask)
+ return a, mask
+
+
+def _copyto(a, val, mask):
+ """
+ Replace values in `a` with NaN where `mask` is True. This differs from
+ copyto in that it will deal with the case where `a` is a numpy scalar.
+
+ Parameters
+ ----------
+ a : ndarray or numpy scalar
+ Array or numpy scalar some of whose values are to be replaced
+ by val.
+ val : numpy scalar
+ Value used a replacement.
+ mask : ndarray, scalar
+ Boolean array. Where True the corresponding element of `a` is
+ replaced by `val`. Broadcasts.
+
+ Returns
+ -------
+ res : ndarray, scalar
+ Array with elements replaced or scalar `val`.
+
+ """
+ if isinstance(a, np.ndarray):
+ np.copyto(a, val, where=mask, casting='unsafe')
+ else:
+ a = a.dtype.type(val)
+ return a
+
+
+def _divide_by_count(a, b, out=None):
+ """
+ Compute a/b ignoring invalid results. If `a` is an array the division
+ is done in place. If `a` is a scalar, then its type is preserved in the
+ output. If out is None, then then a is used instead so that the
+ division is in place.
+
+ Parameters
+ ----------
+ a : {ndarray, numpy scalar}
+ Numerator. Expected to be of inexact type but not checked.
+ b : {ndarray, numpy scalar}
+ Denominator.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+
+ Returns
+ -------
+ ret : {ndarray, numpy scalar}
+ The return value is a/b. If `a` was an ndarray the division is done
+ in place. If `a` is a numpy scalar, the division preserves its type.
+
+ """
+ with np.errstate(invalid='ignore'):
+ if isinstance(a, np.ndarray):
+ if out is None:
+ return np.divide(a, b, out=a, casting='unsafe')
+ else:
+ return np.divide(a, b, out=out, casting='unsafe')
+ else:
+ if out is None:
+ return a.dtype.type(a / b)
+ else:
+ # This is questionable, but currently a numpy scalar can
+ # be output to a zero dimensional array.
+ return np.divide(a, b, out=out, casting='unsafe')
+
+
+def nanmin(a, axis=None, out=None, keepdims=False):
+ """
+ Return the minimum of an array or minimum along an axis, ignoring any
+ NaNs.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose minimum is desired. If `a` is not
+ an array, a conversion is attempted.
+ axis : int, optional
+ Axis along which the minimum is computed. The default is to compute
+ the minimum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See `doc.ufuncs` for details.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ .. versionadded:: 1.8.0
+
+ Returns
+ -------
+ nanmin : ndarray
+ An array with the same shape as `a`, with the specified axis removed.
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
+ returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amax, fmax, maximum
+
+ Notes
+ -----
+ Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative infinity
+ is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.min.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmin(a)
+ 1.0
+ >>> np.nanmin(a, axis=0)
+ array([ 1., 2.])
+ >>> np.nanmin(a, axis=1)
+ array([ 1., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmin([1, 2, np.nan, np.inf])
+ 1.0
+ >>> np.nanmin([1, 2, np.nan, np.NINF])
+ -inf
+
+ """
+ return np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims)
+
+
+def nanmax(a, axis=None, out=None, keepdims=False):
+ """
+ Return the maximum of an array or maximum along an axis, ignoring any NaNs.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose maximum is desired. If `a` is not
+ an array, a conversion is attempted.
+ axis : int, optional
+ Axis along which the maximum is computed. The default is to compute
+ the maximum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See `doc.ufuncs` for details.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ .. versionadded:: 1.8.0
+
+ Returns
+ -------
+ nanmax : ndarray
+ An array with the same shape as `a`, with the specified axis removed.
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
+ returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amin, fmin, minimum
+
+ Notes
+ -----
+ Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative infinity
+ is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.max.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmax(a)
+ 3.0
+ >>> np.nanmax(a, axis=0)
+ array([ 3., 2.])
+ >>> np.nanmax(a, axis=1)
+ array([ 2., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmax([1, 2, np.nan, np.NINF])
+ 2.0
+ >>> np.nanmax([1, 2, np.nan, np.inf])
+ inf
+
+ """
+ return np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims)
+
+
+def nanargmin(a, axis=None):
+ """
+ Return the indices of the minimum values in the specified axis ignoring
+ NaNs. For all-NaN slices, the negative number ``np.iinfo('intp').min``
+ is returned. It is platform dependent. Warning: the results cannot be
+ trusted if a slice contains only NaNs and Infs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmin, nanargmax
+
+ Examples
+ --------
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmin(a)
+ 0
+ >>> np.nanargmin(a)
+ 2
+ >>> np.nanargmin(a, axis=0)
+ array([1, 1])
+ >>> np.nanargmin(a, axis=1)
+ array([1, 0])
+
+ """
+ a, mask = _replace_nan(a, np.inf)
+ if mask is None:
+ return np.argmin(a, axis)
+ # May later want to do something special for all nan slices.
+ mask = mask.all(axis=axis)
+ ind = np.argmin(a, axis)
+ if mask.any():
+ warnings.warn("All NaN axis detected.", NanWarning)
+ ind =_copyto(ind, np.iinfo(np.intp).min, mask)
+ return ind
+
+
+def nanargmax(a, axis=None):
+ """
+ Return the indices of the maximum values in the specified axis ignoring
+ NaNs. For all-NaN slices, the negative number ``np.iinfo('intp').min``
+ is returned. It is platform dependent. Warning: the results cannot be
+ trusted if a slice contains only NaNs and -Infs.
+
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmax, nanargmin
+
+ Examples
+ --------
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmax(a)
+ 0
+ >>> np.nanargmax(a)
+ 1
+ >>> np.nanargmax(a, axis=0)
+ array([1, 0])
+ >>> np.nanargmax(a, axis=1)
+ array([1, 1])
+
+ """
+ a, mask = _replace_nan(a, -np.inf)
+ if mask is None:
+ return np.argmax(a, axis)
+ # May later want to do something special for all nan slices.
+ mask = mask.all(axis=axis)
+ ind = np.argmax(a, axis)
+ if mask.any():
+ warnings.warn("All NaN axis detected.", NanWarning)
+ ind = _copyto(ind, np.iinfo(np.intp).min, mask)
+ return ind
+
+
+def nansum(a, axis=None, dtype=None, out=None, keepdims=0):
+ """
+ Return the sum of array elements over a given axis treating
+ Not a Numbers (NaNs) as zero.
+
+ FutureWarning: In Numpy versions <= 1.8 Nan is returned for slices that
+ are all-NaN or empty. In later versions zero will be returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose sum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : int, optional
+ Axis along which the sum is computed. The default is to compute
+ the sum of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the sum. For integer inputs, the default
+ is the same as `int64`. For inexact inputs, it must be inexact.
+
+ .. versionadded:: 1.8.0
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``. If provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See `doc.ufuncs` for details. The casting of NaN to integer can
+ yield unexpected results.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If True, the axes which are reduced are left in the result as
+ dimensions with size one. With this option, the result will
+ broadcast correctly against the original `arr`.
+
+ .. versionadded:: 1.8.0
+
+ Returns
+ -------
+ y : ndarray or numpy scalar
+
+ See Also
+ --------
+ numpy.sum : Sum across array propagating NaNs.
+ isnan : Show which elements are NaN.
+ isfinite: Show which elements are not NaN or +/-inf.
+
+ Notes
+ -----
+ Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ If positive or negative infinity are present the result is positive or
+ negative infinity. But if both positive and negative infinity are present,
+ the result is Not A Number (NaN).
+
+ Arithmetic is modular when using integer types (all elements of `a` must
+ be finite i.e. no elements that are NaNs, positive infinity and negative
+ infinity because NaNs are floating point types), and no error is raised
+ on overflow.
+
+
+ Examples
+ --------
+ >>> np.nansum(1)
+ 1
+ >>> np.nansum([1])
+ 1
+ >>> np.nansum([1, np.nan])
+ 1.0
+ >>> a = np.array([[1, 1], [1, np.nan]])
+ >>> np.nansum(a)
+ 3.0
+ >>> np.nansum(a, axis=0)
+ array([ 2., 1.])
+ >>> np.nansum([1, np.nan, np.inf])
+ inf
+ >>> np.nansum([1, np.nan, np.NINF])
+ -inf
+ >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
+ nan
+
+ """
+ a, mask = _replace_nan(a, 0)
+ # In version 1.9 uncomment the following line and delete the rest.
+ #return a.sum(axis, dtype, out, keepdims)
+ warnings.warn("In Numpy 1.9 the sum along empty slices will be zero.",
+ FutureWarning)
+
+ if mask is None:
+ return a.sum(axis, dtype, out, keepdims)
+ mask = mask.all(axis, keepdims=keepdims)
+ tot = np.add.reduce(a, axis, dtype, out, keepdims)
+ if mask.any():
+ tot = _copyto(tot, np.nan, mask)
+ return tot
+
+
+def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
+ """
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ For all-NaN slices, NaN is returned and a `NanWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : int, optional
+ Axis along which the means are computed. The default is to compute
+ the mean of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for inexact inputs, it is the same as the
+ input dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See `doc.ufuncs` for details.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned. Nan is
+ returned for slices that contain only NaNs.
+
+ See Also
+ --------
+ average : Weighted average
+ mean : Arithmetic mean taken while not ignoring NaNs
+ var, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the non-NaN elements along the axis
+ divided by the number of non-NaN elements.
+
+ Note that for floating-point input, the mean is computed using the
+ same precision the input has. Depending on the input data, this can
+ cause the results to be inaccurate, especially for `float32`.
+ Specifying a higher-precision accumulator using the `dtype` keyword
+ can alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanmean(a)
+ 2.6666666666666665
+ >>> np.nanmean(a, axis=0)
+ array([ 2., 4.])
+ >>> np.nanmean(a, axis=1)
+ array([ 1., 3.5])
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.mean(arr, axis, dtype=dtype, out=out, keepdims=keepdims)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ # The warning context speeds things up.
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ cnt = np.add.reduce(~mask, axis, dtype=np.intp, keepdims=keepdims)
+ tot = np.add.reduce(arr, axis, dtype=dtype, out=out, keepdims=keepdims)
+ avg = _divide_by_count(tot, cnt, out=out)
+
+ isbad = (cnt == 0)
+ if isbad.any():
+ warnings.warn("Mean of empty slice", NanWarning)
+ # NaN is the only possible bad value, so no further
+ # action is needed to handle bad results.
+ return avg
+
+
+def nanvar(a, axis=None, dtype=None, out=None, ddof=0,
+ keepdims=False):
+ """
+ Compute the variance along the specified axis, while ignoring NaNs.
+
+ Returns the variance of the array elements, a measure of the spread of a
+ distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ For all-NaN slices, NaN is returned and a `NanWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : int, optional
+ Axis along which the variance is computed. The default is to compute
+ the variance of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float32`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : int, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If `out` is None, return a new array containing the variance,
+ otherwise return a reference to the output array. If ddof is >= the
+ number of non-NaN elements in a slice or the slice contains only
+ NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ std : Standard deviation
+ mean : Average
+ var : Variance while not ignoring NaNs
+ nanstd, nanmean
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ The variance is the average of the squared deviations from the mean,
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
+
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+ instead. In standard statistical practice, ``ddof=1`` provides an
+ unbiased estimator of the variance of a hypothetical infinite population.
+ ``ddof=0`` provides a maximum likelihood estimate of the variance for
+ normally distributed variables.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.var(a)
+ 1.5555555555555554
+ >>> np.nanvar(a, axis=0)
+ array([ 1., 0.])
+ >>> np.nanvar(a, axis=1)
+ array([ 0., 0.25])
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.var(arr, axis, dtype=dtype, out=out, keepdims=keepdims)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+
+ # Compute mean
+ cnt = np.add.reduce(~mask, axis, dtype=np.intp, keepdims=True)
+ tot = np.add.reduce(arr, axis, dtype=dtype, keepdims=True)
+ avg = np.divide(tot, cnt, out=tot)
+
+ # Compute squared deviation from mean.
+ x = arr - avg
+ np.copyto(x, 0, where=mask)
+ if issubclass(arr.dtype.type, np.complexfloating):
+ sqr = np.multiply(x, x.conj(), out=x).real
+ else:
+ sqr = np.multiply(x, x, out=x)
+
+ # adjust cnt.
+ if not keepdims:
+ cnt = cnt.squeeze(axis)
+ cnt -= ddof
+
+ # Compute variance.
+ var = np.add.reduce(sqr, axis, dtype=dtype, out=out, keepdims=keepdims)
+ var = _divide_by_count(var, cnt)
+
+ isbad = (cnt <= 0)
+ if isbad.any():
+ warnings.warn("Degrees of freedom <= 0 for slice.", NanWarning)
+ # NaN, inf, or negative numbers are all possible bad
+ # values, so explicitly replace them with NaN.
+ var = _copyto(var, np.nan, isbad)
+ return var
+
+
+def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+ """
+ Compute the standard deviation along the specified axis, while
+ ignoring NaNs.
+
+ Returns the standard deviation, a measure of the spread of a distribution,
+ of the non-NaN array elements. The standard deviation is computed for the
+ flattened array by default, otherwise over the specified axis.
+
+ For all-NaN slices, NaN is returned and a `NanWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of the non-NaN values.
+ axis : int, optional
+ Axis along which the standard deviation is computed. The default is
+ to compute the standard deviation of the flattened array.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it is
+ the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the calculated
+ values) will be cast if necessary.
+ ddof : int, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard
+ deviation, otherwise return a reference to the output array. If
+ ddof is >= the number of non-NaN elements in a slice or the slice
+ contains only NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ var, mean, std
+ nanvar, nanmean
+ numpy.doc.ufuncs : Section "Output arguments"
+
+ Notes
+ -----
+ The standard deviation is the square root of the average of the squared
+ deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
+
+ The average squared deviation is normally calculated as
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
+ the divisor ``N - ddof`` is used instead. In standard statistical
+ practice, ``ddof=1`` provides an unbiased estimator of the variance
+ of the infinite population. ``ddof=0`` provides a maximum likelihood
+ estimate of the variance for normally distributed variables. The
+ standard deviation computed in this function is the square root of
+ the estimated variance, so even with ``ddof=1``, it will not be an
+ unbiased estimate of the standard deviation per se.
+
+ Note that, for complex numbers, `std` takes the absolute
+ value before squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the *std* is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example below).
+ Specifying a higher-accuracy accumulator using the `dtype` keyword can
+ alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanstd(a)
+ 1.247219128924647
+ >>> np.nanstd(a, axis=0)
+ array([ 1., 0.])
+ >>> np.nanstd(a, axis=1)
+ array([ 0., 0.5])
+
+ """
+ var = nanvar(a, axis, dtype, out, ddof, keepdims)
+ if isinstance(var, np.ndarray):
+ std = np.sqrt(var, out=var)
+ else:
+ std = var.dtype.type(np.sqrt(var))
+ return std
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index b63003f80..6873a4785 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1,9 +1,7 @@
-__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
- 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
- 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
+from __future__ import division, absolute_import, print_function
import numpy as np
-import format
+from . import format
import sys
import os
import re
@@ -13,21 +11,31 @@ import warnings
import weakref
from operator import itemgetter
-from cPickle import load as _cload, loads
-from _datasource import DataSource
-from _compiled_base import packbits, unpackbits
+from ._datasource import DataSource
+from ._compiled_base import packbits, unpackbits
-from _iotools import LineSplitter, NameValidator, StringConverter, \
- ConverterError, ConverterLockError, ConversionWarning, \
- _is_string_like, has_nested_fields, flatten_dtype, \
- easy_dtype, _bytes_to_name
+from ._iotools import (
+ LineSplitter, NameValidator, StringConverter,
+ ConverterError, ConverterLockError, ConversionWarning,
+ _is_string_like, has_nested_fields, flatten_dtype,
+ easy_dtype, _bytes_to_name
+ )
-from numpy.compat import asbytes, asstr, asbytes_nested, bytes
+from numpy.compat import (
+ asbytes, asstr, asbytes_nested, bytes, basestring, unicode
+ )
if sys.version_info[0] >= 3:
- from io import BytesIO
+ import pickle
else:
- from cStringIO import StringIO as BytesIO
+ import cPickle as pickle
+ from future_builtins import map
+
+loads = pickle.loads
+
+__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
+ 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
_string_like = _is_string_like
@@ -119,8 +127,7 @@ class BagObj(object):
def zipfile_factory(*args, **kwargs):
import zipfile
- if sys.version_info >= (2, 5):
- kwargs['allowZip64'] = True
+ kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
@@ -235,12 +242,14 @@ class NpzFile(object):
member = 1
key += '.npy'
if member:
- bytes = self.zip.read(key)
- if bytes.startswith(format.MAGIC_PREFIX):
- value = BytesIO(bytes)
- return format.read_array(value)
+ bytes = self.zip.open(key)
+ magic = bytes.read(len(format.MAGIC_PREFIX))
+ bytes.close()
+ if magic == format.MAGIC_PREFIX:
+ bytes = self.zip.open(key)
+ return format.read_array(bytes)
else:
- return bytes
+ return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
@@ -281,7 +290,7 @@ def load(file, mmap_mode=None):
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
- mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
@@ -368,17 +377,22 @@ def load(file, mmap_mode=None):
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
- if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
+ if magic.startswith(_ZIP_PREFIX):
+ # zip-file (assume .npz)
+ # Transfer file ownership to NpzFile
+ tmp = own_fid
own_fid = False
- return NpzFile(fid, own_fid=own_fid)
- elif magic == format.MAGIC_PREFIX: # .npy file
+ return NpzFile(fid, own_fid=tmp)
+ elif magic == format.MAGIC_PREFIX:
+ # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
- else: # Try a pickle
+ else:
+ # Try a pickle
try:
- return _cload(fid)
+ return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
@@ -568,7 +582,7 @@ def _savez(file, args, kwds, compress):
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
- for key, val in namedict.iteritems():
+ for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
@@ -778,15 +792,15 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
- for i in xrange(skiprows):
- fh.next()
+ for i in range(skiprows):
+ next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
- first_line = fh.next()
+ first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
@@ -802,12 +816,12 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
- converters = [defconv for i in xrange(N)]
+ converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
- for i, conv in (user_converters or {}).iteritems():
+ for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
@@ -988,7 +1002,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
- elif hasattr(fname, 'seek'):
+ elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
@@ -1016,7 +1030,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
- elif type(fmt) is str:
+ elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
@@ -1337,14 +1351,14 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
- for i in xrange(skip_header):
- fhd.next()
+ for i in range(skip_header):
+ next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
- first_line = fhd.next()
+ first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
@@ -1598,12 +1612,12 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
- current_column = map(itemgetter(i), rows)
+ current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
- current_column = itertools.imap(itemgetter(i), rows)
+ current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
@@ -1649,19 +1663,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
-# if loose:
-# conversionfuncs = [conv._loose_call for conv in converters]
-# else:
-# conversionfuncs = [conv._strict_call for conv in converters]
-# for (i, vals) in enumerate(rows):
-# rows[i] = tuple([convert(val)
-# for (convert, val) in zip(conversionfuncs, vals)])
+ #
+ # if loose:
+ # conversionfuncs = [conv._loose_call for conv in converters]
+ # else:
+ # conversionfuncs = [conv._strict_call for conv in converters]
+ # for (i, vals) in enumerate(rows):
+ # rows[i] = tuple([convert(val)
+ # for (convert, val) in zip(conversionfuncs, vals)])
if loose:
- rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
- for (i, converter) in enumerate(converters)])
+ rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, converter) in enumerate(converters)]))
else:
- rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
- for (i, converter) in enumerate(converters)])
+ rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, converter) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
@@ -1686,8 +1701,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
- ddtype = zip(names, column_types)
- mdtype = zip(names, [np.bool] * len(column_types))
+ ddtype = list(zip(names, column_types))
+ mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index fad06f4df..5402adc6d 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -1,6 +1,8 @@
"""
Functions to operate on polynomials.
+
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
@@ -167,7 +169,7 @@ def roots(p):
Raises
------
- ValueError :
+ ValueError
When `p` cannot be converted to a rank-1 array.
See also
@@ -564,9 +566,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
- raise TypeError, "expected a 1-d array for weights"
+ raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0] :
- raise TypeError, "expected w and y to have the same length"
+ raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
@@ -903,7 +905,7 @@ def _raise_power(astr, wrap=70):
line1 = ''
line2 = ''
output = ' '
- while 1:
+ while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index 0127df9f9..827e60d70 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -4,8 +4,8 @@ Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
-
"""
+from __future__ import division, absolute_import, print_function
import sys
import itertools
@@ -15,6 +15,10 @@ from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
+from numpy.compat import basestring
+
+if sys.version_info[0] < 3:
+ from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
@@ -287,7 +291,7 @@ def izip_records(seqarrays, fill_value=None, flatten=True):
zipfunc = _izip_fields
#
try:
- for tup in itertools.izip(*iters):
+ for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
@@ -317,7 +321,7 @@ def _fix_defaults(output, defaults=None):
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
- for (k, v) in (defaults or {}).iteritems():
+ for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
@@ -401,7 +405,7 @@ def merge_arrays(seqarrays,
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
- seqarrays = map(np.asanyarray, seqarrays)
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
@@ -412,7 +416,7 @@ def merge_arrays(seqarrays,
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
- for (a, n) in itertools.izip(seqarrays, sizes):
+ for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
@@ -441,7 +445,7 @@ def merge_arrays(seqarrays,
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
- for (a, n) in itertools.izip(seqarrays, sizes):
+ for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 48ed1dc25..2aa08d0ea 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -15,6 +15,7 @@ Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
'arcsin', 'arctanh']
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
index e85fdb517..edc653f9f 100644
--- a/numpy/lib/setup.py
+++ b/numpy/lib/setup.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
from os.path import join
def configuration(parent_package='',top_path=None):
diff --git a/numpy/lib/setupscons.py b/numpy/lib/setupscons.py
deleted file mode 100644
index 4f31f6e8a..000000000
--- a/numpy/lib/setupscons.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from os.path import join
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
-
- config = Configuration('lib',parent_package,top_path)
-
- config.add_sconscript('SConstruct',
- source_files = [join('src', '_compiled_base.c')])
- config.add_data_dir('tests')
-
- return config
-
-if __name__=='__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index aefc81c2b..c63f8140d 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
@@ -53,14 +55,11 @@ def apply_along_axis(func1d,axis,arr,*args):
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
- >>> def new_func(a):
- ... \"\"\"Divide elements of a by 2.\"\"\"
- ... return a * 0.5
- >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
- >>> np.apply_along_axis(new_func, 0, b)
- array([[ 0.5, 1. , 1.5],
- [ 2. , 2.5, 3. ],
- [ 3.5, 4. , 4.5]])
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
+ >>> np.apply_along_axis(sorted, 1, b)
+ array([[1, 7, 8],
+ [3, 4, 9],
+ [2, 5, 6]])
"""
arr = asarray(arr)
@@ -72,7 +71,7 @@ def apply_along_axis(func1d,axis,arr,*args):
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
- indlist = range(nd)
+ indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
@@ -343,7 +342,7 @@ def dstack(tup):
[[3, 4]]])
"""
- return _nx.concatenate(map(atleast_3d,tup),2)
+ return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
@@ -641,10 +640,9 @@ def get_array_prepare(*args):
In case of ties, leftmost wins. If no wrapper is found, return None
"""
- wrappers = [(getattr(x, '__array_priority__', 0), -i,
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
- if hasattr(x, '__array_prepare__')]
- wrappers.sort()
+ if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
@@ -654,10 +652,9 @@ def get_array_wrap(*args):
In case of ties, leftmost wins. If no wrapper is found, return None
"""
- wrappers = [(getattr(x, '__array_priority__', 0), -i,
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
- if hasattr(x, '__array_wrap__')]
- wrappers.sort()
+ if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
@@ -679,12 +676,10 @@ def kron(a,b):
See Also
--------
-
outer : The outer product
Notes
-----
-
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
@@ -752,7 +747,7 @@ def kron(a,b):
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
- for _ in xrange(nd):
+ for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
@@ -835,5 +830,5 @@ def tile(A, reps):
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
- n /= max(dim_in,1)
+ n //= max(dim_in, 1)
return c.reshape(shape)
diff --git a/numpy/lib/src/_compiled_base.c b/numpy/lib/src/_compiled_base.c
index 41caca962..66a765868 100644
--- a/numpy/lib/src/_compiled_base.c
+++ b/numpy/lib/src/_compiled_base.c
@@ -7,9 +7,6 @@
#include "numpy/ufuncobject.h"
#include "string.h"
-#if (PY_VERSION_HEX < 0x02060000)
-#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
-#endif
static npy_intp
incr_slot_(double x, double *bins, npy_intp lbins)
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 7358be222..7b6b06fdc 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -5,6 +5,8 @@ An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
+from __future__ import division, absolute_import, print_function
+
import numpy as np
__all__ = ['broadcast_arrays']
@@ -25,7 +27,10 @@ def as_strided(x, shape=None, strides=None):
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
- return np.asarray(DummyArray(interface, base=x))
+ array = np.asarray(DummyArray(interface, base=x))
+ # Make sure dtype is correct in case of custom dtype
+ array.dtype = x.dtype
+ return array
def broadcast_arrays(*args):
"""
@@ -58,7 +63,7 @@ def broadcast_arrays(*args):
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
- >>> map(np.array, np.broadcast_arrays(x, y))
+ >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
@@ -66,7 +71,7 @@ def broadcast_arrays(*args):
[3, 3, 3]])]
"""
- args = map(np.asarray, args)
+ args = [np.asarray(_m) for _m in args]
shapes = [x.shape for x in args]
if len(set(shapes)) == 1:
# Common case where nothing needs to be broadcasted.
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index ed5af516f..8d3e32b1b 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -1,15 +1,21 @@
+from __future__ import division, absolute_import, print_function
+
import os
+import sys
+import numpy.lib._datasource as datasource
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
-from urlparse import urlparse
-from urllib2 import URLError
-import urllib2
-
-from numpy.testing import *
-
from numpy.compat import asbytes
+from numpy.testing import *
-import numpy.lib._datasource as datasource
+if sys.version_info[0] >= 3:
+ import urllib.request as urllib_request
+ from urllib.parse import urlparse
+ from urllib.error import URLError
+else:
+ import urllib2 as urllib_request
+ from urlparse import urlparse
+ from urllib2 import URLError
def urlopen_stub(url, data=None):
'''Stub to replace urlopen for testing.'''
@@ -19,14 +25,17 @@ def urlopen_stub(url, data=None):
else:
raise URLError('Name or service not known')
+# setup and teardown
old_urlopen = None
+
def setup():
global old_urlopen
- old_urlopen = urllib2.urlopen
- urllib2.urlopen = urlopen_stub
+
+ old_urlopen = urllib_request.urlopen
+ urllib_request.urlopen = urlopen_stub
def teardown():
- urllib2.urlopen = old_urlopen
+ urllib_request.urlopen = old_urlopen
# A valid website for more robust testing
http_path = 'http://www.google.com/'
@@ -92,7 +101,7 @@ class TestDataSourceOpen(TestCase):
self.assertRaises(IOError, self.ds.open, url)
try:
self.ds.open(url)
- except IOError, e:
+ except IOError as e:
# Regression test for bug fixed in r4342.
assert_(e.errno is None)
@@ -300,7 +309,7 @@ class TestRepositoryExists(TestCase):
# would do.
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
local_path = os.path.join(self.repos._destpath, netloc)
- os.mkdir(local_path, 0700)
+ os.mkdir(local_path, 0o0700)
tmpfile = valid_textfile(local_path)
assert_(self.repos.exists(tmpfile))
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index a0f1546e2..421616ccd 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -1,19 +1,13 @@
-import sys
-if sys.version_info[0] >= 3:
- from io import BytesIO
- def StringIO(s=""):
- return BytesIO(asbytes(s))
-else:
- from StringIO import StringIO
+from __future__ import division, absolute_import, print_function
-from datetime import date
+import sys
import time
+from datetime import date
import numpy as np
from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter, \
has_nested_fields, easy_dtype, flatten_dtype
from numpy.testing import *
-
from numpy.compat import asbytes, asbytes_nested
class TestLineSplitter(TestCase):
@@ -181,11 +175,11 @@ class TestStringConverter(TestCase):
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert(asbytes('2001-01-01'))
- assert_equal(test, date(2001, 01, 01))
+ assert_equal(test, date(2001, 1, 1))
test = convert(asbytes('2009-01-01'))
- assert_equal(test, date(2009, 01, 01))
+ assert_equal(test, date(2009, 1, 1))
test = convert(asbytes(''))
- assert_equal(test, date(2000, 01, 01))
+ assert_equal(test, date(2000, 1, 1))
#
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 01cb5be4c..ec96f5c8c 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -1,6 +1,7 @@
-'''
-Tests for the pad functions.
-'''
+"""Tests for the pad functions.
+
+"""
+from __future__ import division, absolute_import, print_function
from numpy.testing import TestCase, run_module_suite, assert_array_equal
from numpy.testing import assert_raises, assert_array_almost_equal
@@ -177,21 +178,21 @@ class TestStatistic(TestCase):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array([
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
-
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
-
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5],
- [4, 4, 4, 4, 4, 4, 5, 6, 5, 5, 5, 5, 5, 5, 5]])
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]])
assert_array_equal(a, b)
def test_check_mean_2(self):
@@ -485,6 +486,14 @@ class TestEdge(TestCase):
assert_array_equal(a, b)
+class TestZeroPadWidth(TestCase):
+ def test_zero_pad_width(self):
+ arr = np.arange(30)
+ arr = np.reshape(arr, (6, 5))
+ for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
+ assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
+
+
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index b0d2ca7c3..4ba6529e4 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -1,6 +1,7 @@
-""" Test functions for 1D array set operations.
+"""Test functions for 1D array set operations.
"""
+from __future__ import division, absolute_import, print_function
from numpy.testing import *
import numpy as np
@@ -60,8 +61,8 @@ class TestSetOps(TestCase):
# test for structured arrays
dt = [('', 'i'), ('', 'i')]
- aa = np.array(zip(a,a), dt)
- bb = np.array(zip(b,b), dt)
+ aa = np.array(list(zip(a,a)), dt)
+ bb = np.array(list(zip(b,b)), dt)
check_all(aa, bb, i1, i2, dt)
@@ -124,8 +125,9 @@ class TestSetOps(TestCase):
# we use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
- a = np.array([5, 7, 1, 2])
- b = np.array([2, 4, 3, 1, 5] * mult)
+ # One check without np.array, to make sure lists are handled correct
+ a = [5, 7, 1, 2]
+ b = [2, 4, 3, 1, 5] * mult
ec = np.array([True, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
@@ -188,6 +190,28 @@ class TestSetOps(TestCase):
assert_array_equal(c, ec)
+ def test_in1d_invert(self):
+ "Test in1d's invert parameter"
+ # We use two different sizes for the b array here to test the
+ # two different paths in in1d().
+ for mult in (1, 10):
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
+ b = [2, 3, 4] * mult
+ assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+ def test_in1d_ravel(self):
+ # Test that in1d ravels its input arrays. This is not documented
+ # behavior however. The test is to ensure consistentency.
+ a = np.arange(6).reshape(2,3)
+ b = np.arange(3,9).reshape(3,2)
+ long_b = np.arange(3, 63).reshape(30,2)
+ ec = np.array([False, False, False, True, True, True])
+
+ assert_array_equal(in1d(a, b, assume_unique=True), ec)
+ assert_array_equal(in1d(a, b, assume_unique=False), ec)
+ assert_array_equal(in1d(a, long_b, assume_unique=True), ec)
+ assert_array_equal(in1d(a, long_b, assume_unique=False), ec)
+
def test_union1d( self ):
a = np.array( [5, 4, 7, 1, 2] )
b = np.array( [2, 4, 3, 3, 2, 1, 5] )
diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py
index c1c59ba31..64ad7f4de 100644
--- a/numpy/lib/tests/test_arrayterator.py
+++ b/numpy/lib/tests/test_arrayterator.py
@@ -1,13 +1,13 @@
+from __future__ import division, absolute_import, print_function
+
from operator import mul
+from functools import reduce
import numpy as np
from numpy.random import randint
from numpy.lib import Arrayterator
from numpy.testing import assert_
-import sys
-if sys.version_info[0] >= 3:
- from functools import reduce
def test():
np.random.seed(np.arange(10))
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 5fe976143..1a276a429 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy as np
@@ -39,7 +41,7 @@ class TestFinancial(TestCase):
def test_npv(self):
assert_almost_equal(np.npv(0.05,[-15000,1500,2500,3500,4500,6000]),
- 117.04, 2)
+ 122.89, 2)
def test_mirr(self):
val = [-4500,-800,800,800,600,600,800,800,700,3000]
@@ -122,15 +124,15 @@ class TestFinancial(TestCase):
assert_almost_equal(np.nper(0.075,-2000,0,100000.,[0,1]),
[ 21.5449442 , 20.76156441], 4)
- assert_almost_equal(np.ipmt(0.1/12,range(5), 24, 2000),
+ assert_almost_equal(np.ipmt(0.1/12,list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
-15.40102862, -14.76028842], 4)
- assert_almost_equal(np.ppmt(0.1/12,range(5), 24, 2000),
+ assert_almost_equal(np.ppmt(0.1/12,list(range(5)), 24, 2000),
[-74.998201 , -75.62318601, -76.25337923,
-76.88882405, -77.52956425], 4)
- assert_almost_equal(np.ppmt(0.1/12,range(5), 24, 2000, 0,
+ assert_almost_equal(np.ppmt(0.1/12,list(range(5)), 24, 2000, 0,
[0,0,1,'end','begin']),
[-74.998201 , -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 213d69760..8b809891f 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -1,12 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
r''' Test the .npy file format.
Set up:
>>> import sys
- >>> if sys.version_info[0] >= 3:
- ... from io import BytesIO as StringIO
- ... else:
- ... from cStringIO import StringIO
+ >>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
@@ -99,19 +98,19 @@ Test the magic string writing.
Test the magic string reading.
- >>> format.read_magic(StringIO(format.magic(1, 0)))
+ >>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
- >>> format.read_magic(StringIO(format.magic(0, 0)))
+ >>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
- >>> format.read_magic(StringIO(format.magic(255, 255)))
+ >>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
- >>> format.read_magic(StringIO(format.magic(2, 5)))
+ >>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
- ... f = StringIO()
+ ... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print repr(f.getvalue())
...
@@ -277,22 +276,15 @@ Test the header writing.
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
-
import sys
import os
import shutil
import tempfile
-
-if sys.version_info[0] >= 3:
- from io import BytesIO as StringIO
-else:
- from cStringIO import StringIO
+from io import BytesIO
import numpy as np
from numpy.testing import *
-
from numpy.lib import format
-
from numpy.compat import asbytes, asbytes_nested
@@ -414,9 +406,9 @@ record_arrays = [
]
def roundtrip(arr):
- f = StringIO()
+ f = BytesIO()
format.write_array(f, arr)
- f2 = StringIO(f.getvalue())
+ f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2)
return arr2
@@ -467,7 +459,7 @@ def test_memmap_roundtrip():
def test_write_version_1_0():
- f = StringIO()
+ f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
@@ -511,12 +503,12 @@ malformed_magic = asbytes_nested([
def test_read_magic_bad_magic():
for magic in malformed_magic:
- f = StringIO(magic)
+ f = BytesIO(magic)
yield raises(ValueError)(format.read_magic), f
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
- f = StringIO(magic)
+ f = BytesIO(magic)
yield raises(ValueError)(format.read_array), f
def test_bad_magic_args():
@@ -526,29 +518,29 @@ def test_bad_magic_args():
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
- s = StringIO()
+ s = BytesIO()
d = {'a':1,'b':2}
format.write_array_header_1_0(s,d)
- s = StringIO()
+ s = BytesIO()
d = {'a':1,'b':2,'c':'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_bad_header():
# header of length less than 2 should fail
- s = StringIO()
+ s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
- s = StringIO(asbytes('1'))
+ s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
- s = StringIO(asbytes('\x01\x00'))
+ s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape":(1,2),
"descr":"x"}
- s = StringIO()
+ s = BytesIO()
format.write_array_header_1_0(s,d)
assert_raises(ValueError, format.read_array_header_1_0, s)
@@ -556,7 +548,7 @@ def test_bad_header():
"fortran_order":False,
"descr":"x",
"extrakey":-1}
- s = StringIO()
+ s = BytesIO()
format.write_array_header_1_0(s,d)
assert_raises(ValueError, format.read_array_header_1_0, s)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 49544b22b..cf303993b 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1,12 +1,15 @@
+from __future__ import division, absolute_import, print_function
+
import warnings
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal,
- assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_raises, assert_allclose, assert_array_max_ulp
- )
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_raises,
+ assert_allclose, assert_array_max_ulp, assert_warns
+ )
from numpy.random import rand
from numpy.lib import *
+from numpy.compat import long
class TestAny(TestCase):
@@ -172,17 +175,79 @@ class TestInsert(TestCase):
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, 1,[1,2,3]), [1, 1, 2, 3, 2, 3])
- assert_equal(insert(a,[1,2,3],9),[1,9,2,9,3,9])
+ assert_equal(insert(a,[1,-1,3],9),[1,9,2,9,3,9])
+ assert_equal(insert(a,slice(-1,None,-1), 9),[9,1,9,2,9,3])
+ assert_equal(insert(a,[-1,1,3], [7,8,9]),[1,8,2,7,3,9])
b = np.array([0, 1], dtype=np.float64)
assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
+ assert_equal(insert(b, [], []), b)
+ # Bools will be treated differently in the future:
+ #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ assert_equal(insert(a, np.array([True]*4), 9), [1,9,9,9,9,2,3])
+ assert_(w[0].category is FutureWarning)
+
def test_multidim(self):
a = [[1, 1, 1]]
r = [[2, 2, 2],
[1, 1, 1]]
+ assert_equal(insert(a, 0, [1]), [1,1,1,1])
assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
assert_equal(insert(a, 0, 2, axis=0), r)
assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
+ a = np.array([[1, 1], [2, 2], [3, 3]])
+ b = np.arange(1,4).repeat(3).reshape(3,3)
+ c = np.concatenate((a[:,0:1], np.arange(1,4).repeat(3).reshape(3,3).T,
+ a[:,1:2]), axis=1)
+ assert_equal(insert(a, [1], [[1],[2],[3]], axis=1), b)
+ assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
+ # scalars behave differently, in this case exactly opposite:
+ assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
+ assert_equal(insert(a, 1, [[1],[2],[3]], axis=1), c)
+
+ a = np.arange(4).reshape(2,2)
+ assert_equal(insert(a[:,:1], 1, a[:,1], axis=1), a)
+ assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)
+
+ # negative axis value
+ a = np.arange(24).reshape((2,3,4))
+ assert_equal(insert(a, 1, a[:,:,3], axis=-1),
+ insert(a, 1, a[:,:,3], axis=2))
+ assert_equal(insert(a, 1, a[:,2,:], axis=-2),
+ insert(a, 1, a[:,2,:], axis=1))
+
+ # invalid axis value
+ assert_raises(IndexError, insert, a, 1, a[:,2,:], axis=3)
+ assert_raises(IndexError, insert, a, 1, a[:,2,:], axis=-4)
+
+ def test_0d(self):
+ # This is an error in the future
+ a = np.array(1)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_equal(insert(a, [], 2, axis=0), np.array(2))
+ assert_(w[0].category is DeprecationWarning)
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = np.arange(10).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+ assert_(isinstance(np.insert(a, [], []), SubClass))
+ assert_(isinstance(np.insert(a, [0,1], [1,2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1,2), [1,2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1,-2), []), SubClass))
+ # This is an error in the future:
+ a = np.array(1).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+
+ def test_index_array_copied(self):
+ x = np.array([1, 1, 1])
+ np.insert([0, 1, 2], x, [3, 4, 5])
+ assert_equal(x, np.array([1, 1, 1]))
+
class TestAmax(TestCase):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -300,6 +365,67 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, n=2, axis=0), out4)
+class TestDelete(TestCase):
+ def setUp(self):
+ self.a = np.arange(5)
+ self.nd_a = np.arange(5).repeat(2).reshape(1,5,2)
+
+ def _check_inverse_of_slicing(self, indices):
+ a_del = delete(self.a, indices)
+ nd_a_del = delete(self.nd_a, indices, axis=1)
+ msg = 'Delete failed for obj: %r' % indices
+ # NOTE: The cast should be removed after warning phase for bools
+ if not isinstance(indices, (slice, int, long, np.integer)):
+ indices = np.asarray(indices, dtype=np.intp)
+ indices = indices[(indices >= 0) & (indices < 5)]
+ assert_array_equal(setxor1d(a_del, self.a[indices,]), self.a,
+ err_msg=msg)
+ xor = setxor1d(nd_a_del[0,:,0], self.nd_a[0,indices,0])
+ assert_array_equal(xor, self.nd_a[0,:,0], err_msg=msg)
+
+ def test_slices(self):
+ lims = [-6, -2, 0, 1, 2, 4, 5]
+ steps = [-3, -1, 1, 3]
+ for start in lims:
+ for stop in lims:
+ for step in steps:
+ s = slice(start, stop, step)
+ self._check_inverse_of_slicing(s)
+
+ def test_fancy(self):
+ # Deprecation/FutureWarning tests should be kept after change.
+ self._check_inverse_of_slicing(np.array([[0,1],[2,1]]))
+ assert_raises(DeprecationWarning, delete, self.a, [100])
+ assert_raises(DeprecationWarning, delete, self.a, [-100])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ self._check_inverse_of_slicing([0, -1, 2, 2])
+ obj = np.array([True, False, False], dtype=bool)
+ self._check_inverse_of_slicing(obj)
+ assert_(w[0].category is FutureWarning)
+ assert_(w[1].category is FutureWarning)
+
+ def test_single(self):
+ self._check_inverse_of_slicing(0)
+ self._check_inverse_of_slicing(-4)
+
+ def test_0d(self):
+ a = np.array(1)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ assert_equal(delete(a, [], axis=0), a)
+ assert_(w[0].category is DeprecationWarning)
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = self.a.view(SubClass)
+ assert_(isinstance(delete(a, 0), SubClass))
+ assert_(isinstance(delete(a, []), SubClass))
+ assert_(isinstance(delete(a, [0,1]), SubClass))
+ assert_(isinstance(delete(a, slice(1,2)), SubClass))
+ assert_(isinstance(delete(a, slice(1,-2)), SubClass))
+
class TestGradient(TestCase):
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -529,7 +655,7 @@ class TestVectorize(TestCase):
res2a = f2(np.arange(3))
assert_equal(res1a, res2a)
assert_equal(res1b, res2b)
-
+
def test_string_ticket_1892(self):
"""Test vectorization over strings: issue 1892."""
f = np.vectorize(lambda x:x)
@@ -885,7 +1011,7 @@ class TestHistogramdd(TestCase):
assert_array_equal(H, answer)
Z = np.zeros((5, 5, 5))
- Z[range(5), range(5), range(5)] = 1.
+ Z[list(range(5)), list(range(5)), list(range(5))] = 1.
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
assert_array_equal(H, Z)
@@ -947,8 +1073,7 @@ class TestHistogramdd(TestCase):
def test_inf_edges(self):
"""Test using +/-inf bin edges works. See #1788."""
- olderr = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
@@ -957,8 +1082,6 @@ class TestHistogramdd(TestCase):
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
- finally:
- np.seterr(**olderr)
class TestUnique(TestCase):
@@ -988,127 +1111,6 @@ class TestCheckFinite(TestCase):
assert_(a.dtype == np.float64)
-class TestNaNFuncts(TestCase):
- def setUp(self):
- self.A = np.array([[[ np.nan, 0.01319214, 0.01620964],
- [ 0.11704017, np.nan, 0.75157887],
- [ 0.28333658, 0.1630199 , np.nan ]],
- [[ 0.59541557, np.nan, 0.37910852],
- [ np.nan, 0.87964135, np.nan ],
- [ 0.70543747, np.nan, 0.34306596]],
- [[ 0.72687499, 0.91084584, np.nan ],
- [ 0.84386844, 0.38944762, 0.23913896],
- [ np.nan, 0.37068164, 0.33850425]]])
-
- def test_nansum(self):
- assert_almost_equal(nansum(self.A), 8.0664079100000006)
- assert_almost_equal(nansum(self.A, 0),
- np.array([[ 1.32229056, 0.92403798, 0.39531816],
- [ 0.96090861, 1.26908897, 0.99071783],
- [ 0.98877405, 0.53370154, 0.68157021]]))
- assert_almost_equal(nansum(self.A, 1),
- np.array([[ 0.40037675, 0.17621204, 0.76778851],
- [ 1.30085304, 0.87964135, 0.72217448],
- [ 1.57074343, 1.6709751 , 0.57764321]]))
- assert_almost_equal(nansum(self.A, 2),
- np.array([[ 0.02940178, 0.86861904, 0.44635648],
- [ 0.97452409, 0.87964135, 1.04850343],
- [ 1.63772083, 1.47245502, 0.70918589]]))
-
- def test_nanmin(self):
- assert_almost_equal(nanmin(self.A), 0.01319214)
- assert_almost_equal(nanmin(self.A, 0),
- np.array([[ 0.59541557, 0.01319214, 0.01620964],
- [ 0.11704017, 0.38944762, 0.23913896],
- [ 0.28333658, 0.1630199 , 0.33850425]]))
- assert_almost_equal(nanmin(self.A, 1),
- np.array([[ 0.11704017, 0.01319214, 0.01620964],
- [ 0.59541557, 0.87964135, 0.34306596],
- [ 0.72687499, 0.37068164, 0.23913896]]))
- assert_almost_equal(nanmin(self.A, 2),
- np.array([[ 0.01319214, 0.11704017, 0.1630199 ],
- [ 0.37910852, 0.87964135, 0.34306596],
- [ 0.72687499, 0.23913896, 0.33850425]]))
- assert_(np.isnan(nanmin([np.nan, np.nan])))
-
- def test_nanargmin(self):
- assert_almost_equal(nanargmin(self.A), 1)
- assert_almost_equal(nanargmin(self.A, 0),
- np.array([[1, 0, 0],
- [0, 2, 2],
- [0, 0, 2]]))
- assert_almost_equal(nanargmin(self.A, 1),
- np.array([[1, 0, 0],
- [0, 1, 2],
- [0, 2, 1]]))
- assert_almost_equal(nanargmin(self.A, 2),
- np.array([[1, 0, 1],
- [2, 1, 2],
- [0, 2, 2]]))
-
- def test_nanmax(self):
- assert_almost_equal(nanmax(self.A), 0.91084584000000002)
- assert_almost_equal(nanmax(self.A, 0),
- np.array([[ 0.72687499, 0.91084584, 0.37910852],
- [ 0.84386844, 0.87964135, 0.75157887],
- [ 0.70543747, 0.37068164, 0.34306596]]))
- assert_almost_equal(nanmax(self.A, 1),
- np.array([[ 0.28333658, 0.1630199 , 0.75157887],
- [ 0.70543747, 0.87964135, 0.37910852],
- [ 0.84386844, 0.91084584, 0.33850425]]))
- assert_almost_equal(nanmax(self.A, 2),
- np.array([[ 0.01620964, 0.75157887, 0.28333658],
- [ 0.59541557, 0.87964135, 0.70543747],
- [ 0.91084584, 0.84386844, 0.37068164]]))
- assert_(np.isnan(nanmax([np.nan, np.nan])))
-
- def test_nanmin_allnan_on_axis(self):
- assert_array_equal(np.isnan(nanmin([[np.nan] * 2] * 3, axis=1)),
- [True, True, True])
-
- def test_nanmin_masked(self):
- a = np.ma.fix_invalid([[2, 1, 3, np.nan], [5, 2, 3, np.nan]])
- ctrl_mask = a._mask.copy()
- test = np.nanmin(a, axis=1)
- assert_equal(test, [1, 2])
- assert_equal(a._mask, ctrl_mask)
- assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool))
-
-
-class TestNanFunctsIntTypes(TestCase):
-
- int_types = (
- np.int8, np.int16, np.int32, np.int64, np.uint8,
- np.uint16, np.uint32, np.uint64)
-
- def setUp(self, *args, **kwargs):
- self.A = np.array([127, 39, 93, 87, 46])
-
- def integer_arrays(self):
- for dtype in self.int_types:
- yield self.A.astype(dtype)
-
- def test_nanmin(self):
- min_value = min(self.A)
- for A in self.integer_arrays():
- assert_equal(nanmin(A), min_value)
-
- def test_nanmax(self):
- max_value = max(self.A)
- for A in self.integer_arrays():
- assert_equal(nanmax(A), max_value)
-
- def test_nanargmin(self):
- min_arg = np.argmin(self.A)
- for A in self.integer_arrays():
- assert_equal(nanargmin(A), min_arg)
-
- def test_nanargmax(self):
- max_arg = np.argmax(self.A)
- for A in self.integer_arrays():
- assert_equal(nanargmax(A), max_arg)
-
-
class TestCorrCoef(TestCase):
A = np.array([[ 0.15391142, 0.18045767, 0.14197213],
[ 0.70461506, 0.96474128, 0.27906989],
@@ -1155,7 +1157,7 @@ class TestCov(TestCase):
assert_equal(cov(np.array([]).reshape(0, 2)).shape, (0, 2))
-class Test_i0(TestCase):
+class Test_I0(TestCase):
def test_simple(self):
assert_almost_equal(i0(0.5), np.array(1.0634833707413234))
A = np.array([ 0.49842636, 0.6969809 , 0.22011976, 0.0155549])
@@ -1414,6 +1416,53 @@ def test_median():
assert_allclose(np.median(a2), 2.5)
assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
assert_allclose(np.median(a2, axis=1), [1, 4])
+ assert_allclose(np.median(a2, axis=None), 2.5)
+ a3 = np.array([[2, 3],
+ [0, 1],
+ [6, 7],
+ [4, 5]])
+ #check no overwrite
+ for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
+ orig = a.copy()
+ np.median(a, axis=None)
+ for ax in range(a.ndim):
+ np.median(a, axis=ax)
+ assert_array_equal(a, orig)
+
+ assert_allclose(np.median(a3, axis=0), [3, 4])
+ assert_allclose(np.median(a3.T, axis=1), [3, 4])
+ assert_allclose(np.median(a3), 3.5)
+ assert_allclose(np.median(a3, axis=None), 3.5)
+ assert_allclose(np.median(a3.T), 3.5)
+
+ a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
+ assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
+ a = np.array([0.0463301, 0.0444502, 0.141249])
+ assert_almost_equal(a[0], np.median(a))
+ a = np.array([0.0444502, 0.141249, 0.0463301])
+ assert_almost_equal(a[-1], np.median(a))
+
+ assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
+ assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
+ assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
+ assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),
+ [1.5, 2.5, 3.5])
+ assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
+ assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
+ assert_allclose(np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
+ assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),
+ [3, 4])
+
+ a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
+ map(np.random.shuffle, a4)
+ assert_allclose(np.median(a4, axis=None),
+ np.median(a4.copy(), axis=None, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=0),
+ np.median(a4.copy(), axis=0, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=1),
+ np.median(a4.copy(), axis=1, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=2),
+ np.median(a4.copy(), axis=2, overwrite_input=True))
class TestAdd_newdoc_ufunc(TestCase):
@@ -1426,7 +1475,5 @@ class TestAdd_newdoc_ufunc(TestCase):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-
-
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 0ede40d5a..32b3c7036 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy as np
from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where,
@@ -242,6 +244,20 @@ def test_ndindex():
expected = [ix for ix, e in np.ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
+ x = list(np.ndindex((1, 2, 3)))
+ assert_array_equal(x, expected)
+
+ # Test use of scalars and tuples
+ x = list(np.ndindex((3,)))
+ assert_array_equal(x, list(np.ndindex(3)))
+
+ # Make sure size argument is optional
+ x = list(np.ndindex())
+ assert_equal(x, [()])
+
+ x = list(np.ndindex(()))
+ assert_equal(x, [()])
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index f8caeedb6..4095dd813 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1,34 +1,49 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import gzip
import os
import threading
-from tempfile import mkstemp, NamedTemporaryFile
+from tempfile import mkstemp, mktemp, NamedTemporaryFile
import time
-from datetime import datetime
import warnings
import gc
-from numpy.testing.utils import WarningManager
+from io import BytesIO
+from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConverterLockError, \
ConversionWarning
-from numpy.compat import asbytes, asbytes_nested, bytes
-
+from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal,
assert_raises, run_module_suite)
from numpy.testing import assert_warns, assert_, build_err_msg
-if sys.version_info[0] >= 3:
- from io import BytesIO
- def StringIO(s=""):
- return BytesIO(asbytes(s))
-else:
- from StringIO import StringIO
- BytesIO = StringIO
+
+class TextIO(BytesIO):
+ """Helper IO class.
+
+ Writes encode strings to bytes if needed, reads return bytes.
+ This makes it easier to emulate files opened in binary mode
+ without needing to explicitly convert strings to bytes in
+ setting up the test data.
+
+ """
+ def __init__(self, s=""):
+ BytesIO.__init__(self, asbytes(s))
+
+ def write(self, s):
+ BytesIO.write(self, asbytes(s))
+
+ def writelines(self, lines):
+ BytesIO.writelines(self, [asbytes(s) for s in lines])
+
MAJVER, MINVER = sys.version_info[:2]
+IS_64BIT = sys.maxsize > 2**32
+
def strptime(s, fmt=None):
"""This function is available in the datetime module only
@@ -71,7 +86,7 @@ class RoundtripTest(object):
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
- target_file = StringIO()
+ target_file = BytesIO()
load_file = target_file
arr = args
@@ -125,6 +140,19 @@ class TestSavezLoad(RoundtripTest, TestCase):
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
+ @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
+ @np.testing.dec.slow
+ def test_big_arrays(self):
+ L = (1 << 31) + 100000
+ tmp = mktemp(suffix='.npz')
+ a = np.empty(L, dtype=np.uint8)
+ np.savez(tmp, a=a)
+ del a
+ npfile = np.load(tmp)
+ a = npfile['a']
+ npfile.close()
+ os.remove(tmp)
+
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
@@ -133,7 +161,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
- c = StringIO()
+ c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
@@ -151,14 +179,14 @@ class TestSavezLoad(RoundtripTest, TestCase):
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
- except OSError, err:
+ except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
- for j in xrange(3)]
+ for j in range(3)]
for t in threads:
t.start()
for t in threads:
@@ -192,7 +220,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
- # It might be a "week" test since failed to get triggered on
+ # It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
@@ -203,109 +231,129 @@ class TestSavezLoad(RoundtripTest, TestCase):
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
-
- for i in range(1, 1025):
- try:
- np.load(tmp)["data"]
- except Exception, e:
- raise AssertionError("Failed to load data from a file: %s" % e)
+ # We need to check if the garbage collector can properly close
+ # numpy npz file returned by np.load when their reference count
+ # goes to zero. Python 3 running in debug mode raises a
+ # ResourceWarning when file closing is left to the garbage
+ # collector, so we catch the warnings. Because ResourceWarning
+ # is unknown in Python < 3.x, we take the easy way out and
+ # catch all warnings.
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ for i in range(1, 1025):
+ try:
+ np.load(tmp)["data"]
+ except Exception as e:
+ msg = "Failed to load data from a file: %s" % e
+ raise AssertionError(msg)
finally:
os.remove(tmp)
+ def test_closing_zipfile_after_load(self):
+ # Check that zipfile owns file and can close it.
+ # This needs to pass a file name to load for the
+ # test.
+ fd, tmp = mkstemp(suffix='.npz')
+ os.close(fd)
+ np.savez(tmp, lab='place holder')
+ data = np.load(tmp)
+ fp = data.zip.fp
+ data.close()
+ assert_(fp.closed)
+
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
- asbytes_nested(
- [(fmt + ' ' + fmt + '\n') % (1, 2),
- (fmt + ' ' + fmt + '\n') % (3, 4)]))
+ [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
+ asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
- assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
- assert_equal(lines, asbytes_nested(['1\n', '2\n', '3\n', '4\n']))
+ assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
- assert_equal(c.readlines(), asbytes_nested(['1 2\n', '3 4\n']))
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
- c = StringIO()
- np.savetxt(c, a, delimiter=asbytes(','), fmt='%d')
+ c = BytesIO()
+ np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
- assert_equal(c.readlines(), asbytes_nested(['1,2\n', '3,4\n']))
+ assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
- c = StringIO()
+ c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
- assert_equal(c.readlines(), asbytes_nested(['01 2.0\n', '03 4.0\n']))
+ assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
- assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
- assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n']))
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
- c = StringIO()
+ c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
- asbytes('# ' + test_header_footer +'\n1 2\n3 4\n' ))
+ asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
- c = StringIO()
+ c = BytesIO()
commentstr = '% '
- np.savetxt(c, a, fmt='%1d', header=test_header_footer,
- comments=commentstr)
+ np.savetxt(c, a, fmt='%1d',
+ header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
- c = StringIO()
+ c = BytesIO()
commentstr = '% '
- np.savetxt(c, a, fmt='%1d', footer=test_header_footer,
- comments=commentstr)
+ np.savetxt(c, a, fmt='%1d',
+ footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
@@ -329,29 +377,41 @@ class TestSaveTxt(TestCase):
im = np.e
a[:] = re + 1.0j * im
# One format only
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
- _assert_floatstr_lines_equal(lines, asbytes_nested([
- ' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
- ' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']))
+ _assert_floatstr_lines_equal(lines,
+ [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
+ b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
- _assert_floatstr_lines_equal(lines, asbytes_nested([
- ' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
- ' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']))
+ _assert_floatstr_lines_equal(lines,
+ [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
+ b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
- _assert_floatstr_lines_equal(lines, asbytes_nested([
- '(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
- '(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']))
+ _assert_floatstr_lines_equal(lines,
+ [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
+ b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+
+ def test_custom_writer(self):
+
+ class CustomWriter(list):
+ def write(self, text):
+ self.extend(text.split(b'\n'))
+
+ w = CustomWriter()
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(w, a)
+ b = np.loadtxt(w)
+ assert_array_equal(a, b)
def _assert_floatstr_lines_equal(actual_lines, expected_lines):
@@ -376,15 +436,15 @@ def _assert_floatstr_lines_equal(actual_lines, expected_lines):
class TestLoadTxt(TestCase):
def test_record(self):
- c = StringIO()
- c.write(asbytes('1 2\n3 4'))
+ c = TextIO()
+ c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
- d = StringIO()
- d.write(asbytes('M 64.0 75.0\nF 25.0 60.0'))
+ d = TextIO()
+ d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1',
@@ -395,8 +455,8 @@ class TestLoadTxt(TestCase):
assert_array_equal(y, b)
def test_array(self):
- c = StringIO()
- c.write(asbytes('1 2\n3 4'))
+ c = TextIO()
+ c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
@@ -409,23 +469,23 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_1D(self):
- c = StringIO()
- c.write(asbytes('1\n2\n3\n4\n'))
+ c = TextIO()
+ c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
- c = StringIO()
- c.write(asbytes('1,2,3,4\n'))
+ c = TextIO()
+ c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
- c = StringIO()
- c.write(asbytes('1,2,3,,5\n'))
+ c = TextIO()
+ c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
@@ -433,8 +493,8 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_converters_with_usecols(self):
- c = StringIO()
- c.write(asbytes('1,2,3,,5\n6,7,8,9,10\n'))
+ c = TextIO()
+ c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)}, \
@@ -443,8 +503,8 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_comments(self):
- c = StringIO()
- c.write(asbytes('# comment\n1,2,3,5\n'))
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
@@ -452,16 +512,16 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_skiprows(self):
- c = StringIO()
- c.write(asbytes('comment\n1,2,3,5\n'))
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
- c = StringIO()
- c.write(asbytes('# comment\n1,2,3,5\n'))
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
@@ -470,14 +530,14 @@ class TestLoadTxt(TestCase):
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
- c = StringIO()
+ c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
@@ -492,16 +552,16 @@ class TestLoadTxt(TestCase):
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
- c = StringIO(data)
+ c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
- arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
- assert_equal(arr['stid'], asbytes_nested(["JOE", "BOB"]))
+ arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
- c = StringIO()
- c.write(asbytes('1,2,3.0\n4,5,6.0\n'))
+ c = TextIO()
+ c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
@@ -509,7 +569,7 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_shaped_dtype(self):
- c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
@@ -518,33 +578,30 @@ class TestLoadTxt(TestCase):
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
- c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
- a = np.array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])],
- dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0,
+ [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])],
+ dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
- c = StringIO()
+ c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
- finally:
- warn_ctx.__exit__()
def test_unused_converter(self):
- c = StringIO()
- c.writelines([asbytes('1 21\n'), asbytes('3 42\n')])
+ c = TextIO()
+ c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
@@ -559,12 +616,12 @@ class TestLoadTxt(TestCase):
"Test using an explicit dtype with an object"
from datetime import date
import time
- data = asbytes(""" 1; 2001-01-01
- 2; 2002-01-31 """)
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
- test = np.loadtxt(StringIO(data), delimiter=";", dtype=ndtype,
+ test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
@@ -572,23 +629,23 @@ class TestLoadTxt(TestCase):
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
- c = StringIO()
- c.write(asbytes("%s %s" % tgt))
+ c = TextIO()
+ c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
- c = StringIO()
- c.write(asbytes("%s %s" % tgt))
+ c = TextIO()
+ c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
- os.write(f, asbytes('1 21\r3 42\r'))
+ os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
@@ -598,29 +655,29 @@ class TestLoadTxt(TestCase):
os.unlink(name)
def test_empty_field_after_tab(self):
- c = StringIO()
- c.write(asbytes('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t'))
+ c = TextIO()
+ c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = { 'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
- a = np.array([asbytes('start '), asbytes(' '), asbytes('')])
+ a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
- txt = StringIO(asbytes("M 21 72\nF 35 58"))
+ txt = TextIO("M 21 72\nF 35 58")
dt = { 'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
- assert_array_equal(a, np.array([asbytes('M'), asbytes('F')]))
+ assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([ 72., 58.]))
def test_ndmin_keyword(self):
- c = StringIO()
- c.write(asbytes('1,2,3\n4,5,6'))
+ c = TextIO()
+ c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
@@ -629,8 +686,9 @@ class TestLoadTxt(TestCase):
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
- d = StringIO()
- d.write(asbytes('0,1,2'))
+
+ d = TextIO()
+ d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
@@ -640,8 +698,9 @@ class TestLoadTxt(TestCase):
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
- e = StringIO()
- e.write(asbytes('0\n1\n2'))
+
+ e = TextIO()
+ e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
@@ -653,29 +712,26 @@ class TestLoadTxt(TestCase):
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
- f = StringIO()
+ f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
- finally:
- warn_ctx.__exit__()
def test_generator_source(self):
def count():
for i in range(10):
- yield asbytes("%d" % i)
+ yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
class Testfromregex(TestCase):
+ # np.fromregex expects files opened in binary mode.
def test_record(self):
- c = StringIO()
- c.write(asbytes('1.312 foo\n1.534 bar\n4.444 qux'))
+ c = TextIO()
+ c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
@@ -685,8 +741,8 @@ class Testfromregex(TestCase):
assert_array_equal(x, a)
def test_record_2(self):
- c = StringIO()
- c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
@@ -696,8 +752,8 @@ class Testfromregex(TestCase):
assert_array_equal(x, a)
def test_record_3(self):
- c = StringIO()
- c.write(asbytes('1312 foo\n1534 bar\n4444 qux'))
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
@@ -713,13 +769,13 @@ class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
- data = StringIO(asbytes('1 2\n3 4'))
+ data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
- data = StringIO('M 64.0 75.0\nF 25.0 60.0')
+ data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
@@ -730,7 +786,7 @@ class TestFromTxt(TestCase):
def test_array(self):
"Test outputing a standard ndarray"
- data = StringIO('1 2\n3 4')
+ data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
@@ -744,36 +800,36 @@ class TestFromTxt(TestCase):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
- data = StringIO('1\n2\n3\n4\n')
+ data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
- data = StringIO('1,2,3,4\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','))
+ data = TextIO('1,2,3,4\n')
+ test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
- data = StringIO('# comment\n1,2,3,5\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
+ data = TextIO('# comment\n1,2,3,5\n')
+ test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
- data = StringIO('1,2,3,5# comment\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=asbytes(','), comments=asbytes('#'))
+ data = TextIO('1,2,3,5# comment\n')
+ test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
- kwargs = dict(dtype=int, delimiter=asbytes(','))
+ kwargs = dict(dtype=int, delimiter=',')
#
- data = StringIO('comment\n1,2,3,5\n')
+ data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
- data = StringIO('# comment\n1,2,3,5\n')
+ data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
@@ -783,42 +839,37 @@ class TestFromTxt(TestCase):
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
- test = np.genfromtxt(StringIO(asbytes("\n".join(data))), **kwargs)
+ test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
- basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
+ with warnings.catch_warnings():
warnings.filterwarnings("ignore")
+ basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
- StringIO(basestr), skip_footer=1)
+ TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
- a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
+ a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
- a = np.genfromtxt(StringIO(basestr), skip_footer=3)
+ a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
- a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False)
+ a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
- a = np.genfromtxt(StringIO(basestr), skip_footer=3, invalid_raise=False)
+ a = np.genfromtxt(TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
- finally:
- warn_ctx.__exit__()
-
def test_header(self):
"Test retrieving a header"
- data = StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
+ data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
- control = {'gender': np.array(asbytes_nested(['M', 'F'])),
+ control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
@@ -827,9 +878,9 @@ class TestFromTxt(TestCase):
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
- data = StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
+ data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
- control = [np.array(asbytes_nested(['A', 'BCD'])),
+ control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
@@ -841,7 +892,7 @@ class TestFromTxt(TestCase):
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
- data = StringIO('1 2 3 4\n5 6 7 8\n')
+ data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
@@ -849,7 +900,7 @@ class TestFromTxt(TestCase):
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
- data = StringIO('1,2,3.0\n4,5,6.0\n')
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
@@ -860,7 +911,7 @@ class TestFromTxt(TestCase):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
- data = StringIO('M 64.0 75.0\nF 25.0 60.0')
+ data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
@@ -871,7 +922,7 @@ class TestFromTxt(TestCase):
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
- data = StringIO("""
+ data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
@@ -883,7 +934,7 @@ M 33 21.99
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
- data = StringIO("""
+ data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
@@ -895,7 +946,7 @@ M 33 21.99
def test_autonames_and_usecols(self):
"Tests names and usecols"
- data = StringIO('A B C D\n aaaa 121 45 9.1')
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
@@ -905,7 +956,7 @@ M 33 21.99
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
- data = StringIO('1,2,3,,5\n6,7,8,9,10\n')
+ data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or - 999)},
usecols=(1, 3,))
@@ -914,7 +965,7 @@ M 33 21.99
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
- data = StringIO('A B C D\n aaaa 121 45 9.1')
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
@@ -924,26 +975,26 @@ M 33 21.99
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
- data = StringIO('2009-02-03 12:00:00Z, 72214.0')
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
- control = np.array((datetime(2009, 02, 03), 72214.),
+ control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
- data = StringIO('2009-02-03 12:00:00Z, 72214.0')
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
- control = np.array((datetime(2009, 02, 03), 72214.),
+ control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
- data = StringIO("1 21\n 3 42\n")
+ data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
@@ -955,11 +1006,11 @@ M 33 21.99
def test_invalid_converter(self):
- strip_rand = lambda x : float((asbytes('r') in x.lower() and x.split()[-1]) or
- (not asbytes('r') in x.lower() and x.strip() or 0.0))
- strip_per = lambda x : float((asbytes('%') in x.lower() and x.split()[0]) or
- (not asbytes('%') in x.lower() and x.strip() or 0.0))
- s = StringIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" \
+ strip_rand = lambda x : float((b'r' in x.lower() and x.split()[-1]) or
+ (b'r' not in x.lower() and x.strip() or 0.0))
+ strip_per = lambda x : float((b'%' in x.lower() and x.split()[0]) or
+ (b'%' not in x.lower() and x.strip() or 0.0))
+ s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(converters={2 : strip_per, 3 : strip_rand}, delimiter=",",
@@ -968,7 +1019,7 @@ M 33 21.99
def test_tricky_converter_bug1666(self):
"Test some corner case"
- s = StringIO('q1,2\nq3,4')
+ s = TextIO('q1,2\nq3,4')
cnv = lambda s:float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0:cnv})
control = np.array([[1., 2.], [3., 4.]])
@@ -978,12 +1029,12 @@ M 33 21.99
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
- test = np.ndfromtxt(StringIO(dstr,),
+ test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
- test = np.ndfromtxt(StringIO(dstr,),
+ test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
@@ -993,12 +1044,12 @@ M 33 21.99
"Test using an explicit dtype with an object"
from datetime import date
import time
- data = asbytes(""" 1; 2001-01-01
- 2; 2002-01-31 """)
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
- test = np.genfromtxt(StringIO(data), delimiter=";", dtype=ndtype,
+ test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
@@ -1006,7 +1057,7 @@ M 33 21.99
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
- test = np.genfromtxt(StringIO(data), delimiter=";",
+ test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
@@ -1017,7 +1068,7 @@ M 33 21.99
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
- data = StringIO('skip,skip,2001-01-01,1.0,skip')
+ data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
@@ -1027,7 +1078,7 @@ M 33 21.99
def test_spacedelimiter(self):
"Test space delimiter"
- data = StringIO("1 2 3 4 5\n6 7 8 9 10")
+ data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.]])
@@ -1036,13 +1087,13 @@ M 33 21.99
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
- test = np.genfromtxt(StringIO(data), delimiter=3)
+ test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
- data = StringIO('1,2,3,,5\n')
+ data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
@@ -1052,7 +1103,7 @@ M 33 21.99
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
- test = np.genfromtxt(StringIO(txt), delimiter="\t",
+ test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
@@ -1064,14 +1115,14 @@ M 33 21.99
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
- data = StringIO()
+ data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
- data = StringIO()
+ data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
@@ -1084,23 +1135,23 @@ M 33 21.99
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
- data = StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
+ data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
- test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
- assert_equal(test['stid'], asbytes_nested(["JOE", "BOB"]))
+ test = np.ndfromtxt(data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
- test = np.genfromtxt(StringIO("1 2 3\n4 5 6"), usecols=0)
+ test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
@@ -1108,27 +1159,24 @@ M 33 21.99
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
- test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
- warnings.filterwarnings("ignore", message="genfromtxt: Empty input file:")
- data = StringIO()
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore",
+ message="genfromtxt: Empty input file:")
+ data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
- finally:
- warn_ctx.__exit__()
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
- data = StringIO('1,2,3.0\n4,5,6.0\n')
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
@@ -1136,7 +1184,7 @@ M 33 21.99
def test_shaped_dtype(self):
- c = StringIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
@@ -1145,7 +1193,7 @@ M 33 21.99
assert_array_equal(x, a)
def test_withmissing(self):
- data = StringIO('A,B\n0,1\n2,N/A')
+ data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
@@ -1168,7 +1216,7 @@ M 33 21.99
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
- test = np.mafromtxt(StringIO(data), missing_values="N/A",
+ test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
@@ -1177,7 +1225,7 @@ M 33 21.99
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
- test = np.mafromtxt(StringIO(data),
+ test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
@@ -1185,7 +1233,7 @@ M 33 21.99
dtype=mdtype)
assert_equal(test, control)
#
- test = np.mafromtxt(StringIO(data),
+ test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
@@ -1203,18 +1251,18 @@ M 33 21.99
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
- test = np.genfromtxt(StringIO(data), **kwargs)
+ test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
- test = np.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs)
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
- data = StringIO('A,B\n0,1.5\n2,-999.00')
+ data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
@@ -1226,7 +1274,7 @@ M 33 21.99
def test_with_masked_column_uniform(self):
"Test masked column"
- data = StringIO('1 2 3\n4 5 6\n')
+ data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
@@ -1234,7 +1282,7 @@ M 33 21.99
def test_with_masked_column_various(self):
"Test masked column"
- data = StringIO('True 2 3\nFalse 5 6\n')
+ data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
@@ -1249,7 +1297,7 @@ M 33 21.99
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
- mdata = StringIO("\n".join(data))
+ mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
@@ -1272,7 +1320,7 @@ M 33 21.99
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
- mdata = StringIO("\n".join(data))
+ mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
@@ -1296,7 +1344,7 @@ M 33 21.99
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
- mdata = StringIO("\n".join(data))
+ mdata = TextIO("\n".join(data))
converters = {4: lambda x:"(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
@@ -1307,7 +1355,7 @@ M 33 21.99
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(StringIO(data),
+ mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
@@ -1316,7 +1364,7 @@ M 33 21.99
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(StringIO(data),
+ mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
@@ -1324,7 +1372,7 @@ M 33 21.99
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(StringIO(data),
+ mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
@@ -1333,7 +1381,7 @@ M 33 21.99
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(StringIO(data),
+ mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
@@ -1342,7 +1390,7 @@ M 33 21.99
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(StringIO(data), delimiter=",",
+ mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
@@ -1352,11 +1400,11 @@ M 33 21.99
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
- mtest = np.ndfromtxt(StringIO(data), **kwargs)
+ mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
- mtest = np.ndfromtxt(StringIO(data), autostrip=True, **kwargs)
+ mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
@@ -1365,20 +1413,20 @@ M 33 21.99
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
- test = np.genfromtxt(StringIO(txt),
+ test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
- test = np.genfromtxt(StringIO(txt),
+ test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
- test = np.genfromtxt(StringIO(txt),
+ test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
@@ -1392,17 +1440,17 @@ M 33 21.99
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
- test = np.ndfromtxt(StringIO(data), dtype=None, **kwargs)
+ test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
- test = np.ndfromtxt(StringIO(data), **kwargs)
+ test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
@@ -1412,17 +1460,17 @@ M 33 21.99
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
- test = np.genfromtxt(StringIO(data),
+ test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
@@ -1433,35 +1481,35 @@ M 33 21.99
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
- test = np.ndfromtxt(StringIO(data), **kwargs)
+ test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
- test = np.ndfromtxt(StringIO(data), **kwargs)
+ test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
- data = "1, 2, 3\n1, , 5\n0, 6, \n"
+ data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values= -999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
- test = np.ndfromtxt(StringIO(data), **kwargs)
+ test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
- test = np.genfromtxt(StringIO("test1,testNonetherestofthedata"),
+ test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
- assert_equal(test[1], asbytes('testNonetherestofthedata'))
- test = np.genfromtxt(StringIO("test1, testNonetherestofthedata"),
+ assert_equal(test[1], b'testNonetherestofthedata')
+ test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
- assert_equal(test[1], asbytes(' testNonetherestofthedata'))
+ assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
- data = StringIO('A,B\n0,1\n2,3')
+ data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
@@ -1469,7 +1517,7 @@ M 33 21.99
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
- data = StringIO('A,B\n0,1\n2,N/A')
+ data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
@@ -1480,7 +1528,7 @@ M 33 21.99
def test_recfromcsv(self):
#
- data = StringIO('A,B\n0,1\n2,3')
+ data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
@@ -1488,7 +1536,7 @@ M 33 21.99
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
- data = StringIO('A,B\n0,1\n2,N/A')
+ data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
@@ -1497,7 +1545,7 @@ M 33 21.99
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
- data = StringIO('A,B\n0,1\n2,3')
+ data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
@@ -1526,6 +1574,7 @@ M 33 21.99
os.unlink(name)
def test_gft_using_generator(self):
+ # gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
@@ -1537,7 +1586,7 @@ M 33 21.99
def test_gzip_load():
a = np.random.random((5, 5))
- s = StringIO()
+ s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
@@ -1554,9 +1603,9 @@ def test_gzip_loadtxt():
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
- s = StringIO()
+ s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
- g.write(asbytes('1 2 3\n'))
+ g.write(b'1 2 3\n')
g.close()
s.seek(0)
@@ -1570,9 +1619,9 @@ def test_gzip_loadtxt():
os.unlink(name)
def test_gzip_loadtxt_from_string():
- s = StringIO()
+ s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
- f.write(asbytes('1 2 3\n'))
+ f.write(b'1 2 3\n')
f.close()
s.seek(0)
@@ -1580,7 +1629,7 @@ def test_gzip_loadtxt_from_string():
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
- s = StringIO()
+ s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
@@ -1594,7 +1643,7 @@ def test_npzfile_dict():
assert_('x' in z.keys())
assert_('y' in z.keys())
- for f, a in z.iteritems():
+ for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
@@ -1603,13 +1652,13 @@ def test_npzfile_dict():
for f in z:
assert_(f in ['x', 'y'])
- assert_('x' in list(z.iterkeys()))
+ assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
- f = StringIO()
+ f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
new file mode 100644
index 000000000..93a5ef855
--- /dev/null
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -0,0 +1,417 @@
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
+import numpy as np
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
+ assert_raises
+ )
+from numpy.lib import (
+ nansum, nanmax, nanargmax, nanargmin, nanmin, nanmean, nanvar, nanstd,
+ NanWarning
+ )
+
+
+_ndat = np.array(
+ [[ 0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
+ [ 0.5351, 0.9403, np.nan, 0.2100, 0.4759, 0.2833],
+ [ np.nan, np.nan, np.nan, 0.1042, np.nan, 0.5954],
+ [ 0.161 , np.nan, np.nan, 0.1859, 0.3146, np.nan]]
+ )
+
+# rows of _ndat with nans removed
+_rdat = [
+ np.array([ 0.6244, 0.2692, 0.0116, 0.1170]),
+ np.array([ 0.5351, 0.9403, 0.2100, 0.4759, 0.2833]),
+ np.array([ 0.1042, 0.5954]),
+ np.array([ 0.1610, 0.1859, 0.3146])
+ ]
+
+
+class TestNanFunctions_MinMax(TestCase):
+
+ nanfuncs = [nanmin, nanmax]
+ stdfuncs = [np.min, np.max]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_allnans(self):
+ mat = np.array([np.nan]*9).reshape(3, 3)
+ for f in self.nanfuncs:
+ for axis in [None, 0, 1]:
+ assert_(np.isnan(f(mat, axis=axis)).all())
+
+ def test_masked(self):
+ mat = np.ma.fix_invalid(_ndat)
+ msk = mat._mask.copy()
+ for f in [nanmin]:
+ res = f(mat, axis=1)
+ tgt = f(_ndat, axis=1)
+ assert_equal(res, tgt)
+ assert_equal(mat._mask, msk)
+ assert_(not np.isinf(mat).any())
+
+
+class TestNanFunctions_ArgminArgmax(TestCase):
+
+ nanfuncs = [nanargmin, nanargmax]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_result_values(self):
+ for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
+ for row in _ndat:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ ind = f(row)
+ val = row[ind]
+ # comparing with NaN is tricky as the result
+ # is always false except for NaN != NaN
+ assert_(not np.isnan(val))
+ assert_(not fcmp(val, row).any())
+ assert_(not np.equal(val, row[:ind]).any())
+
+ def test_allnans(self):
+ mat = np.array([np.nan]*9).reshape(3, 3)
+ tgt = np.iinfo(np.intp).min
+ for f in self.nanfuncs:
+ for axis in [None, 0, 1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=axis)
+ assert_((res == tgt).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, NanWarning))
+
+ def test_empty(self):
+ mat = np.zeros((0,3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ assert_raises(ValueError, f, mat, axis=axis)
+ for axis in [1]:
+ res = f(mat, axis=axis)
+ assert_equal(res, np.zeros(0))
+
+
+class TestNanFunctions_IntTypes(TestCase):
+
+ int_types = (
+ np.int8, np.int16, np.int32, np.int64, np.uint8,
+ np.uint16, np.uint32, np.uint64)
+
+ def setUp(self, *args, **kwargs):
+ self.mat = np.array([127, 39, 93, 87, 46])
+
+ def integer_arrays(self):
+ for dtype in self.int_types:
+ yield self.mat.astype(dtype)
+
+ def test_nanmin(self):
+ min_value = min(self.mat)
+ for mat in self.integer_arrays():
+ assert_equal(nanmin(mat), min_value)
+
+ def test_nanmax(self):
+ max_value = max(self.mat)
+ for mat in self.integer_arrays():
+ assert_equal(nanmax(mat), max_value)
+
+ def test_nanargmin(self):
+ min_arg = np.argmin(self.mat)
+ for mat in self.integer_arrays():
+ assert_equal(nanargmin(mat), min_arg)
+
+ def test_nanargmax(self):
+ max_arg = np.argmax(self.mat)
+ for mat in self.integer_arrays():
+ assert_equal(nanargmax(mat), max_arg)
+
+
+class TestNanFunctions_Sum(TestCase):
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ nansum(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = np.sum(mat, axis=axis, keepdims=True)
+ res = nansum(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ resout = np.zeros(3)
+ tgt = np.sum(mat, axis=1)
+ res = nansum(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_dtype(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for c in codes:
+ tgt = np.sum(mat, dtype=np.dtype(c), axis=1).dtype.type
+ res = nansum(mat, dtype=np.dtype(c), axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = np.sum(mat, dtype=np.dtype(c), axis=None).dtype.type
+ res = nansum(mat, dtype=np.dtype(c), axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_char(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for c in codes:
+ tgt = np.sum(mat, dtype=c, axis=1).dtype.type
+ res = nansum(mat, dtype=c, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = np.sum(mat, dtype=c, axis=None).dtype.type
+ res = nansum(mat, dtype=c, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = np.sum(mat, axis=1).dtype.type
+ res = nansum(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = np.sum(mat, axis=None).dtype.type
+ res = nansum(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ tgt = [np.sum(d) for d in _rdat]
+ res = nansum(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_allnans(self):
+ # Check for FutureWarning and later change of return from
+ # NaN to zero.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = nansum([np.nan]*3, axis=None)
+ if np.__version__[:3] < '1.9':
+ assert_(np.isnan(res), 'result is not NaN')
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, FutureWarning))
+ else:
+ assert_(res == 0, 'result is not 0')
+ assert_(len(w) == 0, 'warning raised')
+
+ def test_empty(self):
+ mat = np.zeros((0,3))
+ if np.__version__[:3] < '1.9':
+ tgt = [np.nan]*3
+ res = nansum(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = []
+ res = nansum(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = np.nan
+ res = nansum(mat, axis=None)
+ assert_equal(res, tgt)
+ else:
+ tgt = [0]*3
+ res = nansum(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = []
+ res = nansum(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = 0
+ res = nansum(mat, axis=None)
+ assert_equal(res, tgt)
+
+
+class TestNanFunctions_MeanVarStd(TestCase):
+
+ nanfuncs = [nanmean, nanvar, nanstd]
+ stdfuncs = [np.mean, np.var, np.std]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool_, np.int_, np.object]:
+ assert_raises( TypeError, f, _ndat, axis=1, dtype=np.int)
+
+ def test_out_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool_, np.int_, np.object]:
+ out = np.empty(_ndat.shape[0], dtype=dtype)
+ assert_raises( TypeError, f, _ndat, axis=1, out=out)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_dtype(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_char(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ tgt = rf(mat, dtype=c, axis=1).dtype.type
+ res = nf(mat, dtype=c, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=c, axis=None).dtype.type
+ res = nf(mat, dtype=c, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_ddof(self):
+ nanfuncs = [nanvar, nanstd]
+ stdfuncs = [np.var, np.std]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in [0, 1]:
+ tgt = [rf(d, ddof=ddof) for d in _rdat]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_almost_equal(res, tgt)
+
+ def test_ddof_too_big(self):
+ nanfuncs = [nanvar, nanstd]
+ stdfuncs = [np.var, np.std]
+ dsize = [len(d) for d in _rdat]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in range(5):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ tgt = [ddof >= d for d in dsize]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_equal(np.isnan(res), tgt)
+ if any(tgt):
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, NanWarning))
+ else:
+ assert_(len(w) == 0)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_allnans(self):
+ mat = np.array([np.nan]*9).reshape(3, 3)
+ for f in self.nanfuncs:
+ for axis in [None, 0, 1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, NanWarning))
+
+ def test_empty(self):
+ mat = np.zeros((0,3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, NanWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(mat, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 3c69cb93b..fa166b7f1 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,4 +1,6 @@
-"""
+from __future__ import division, absolute_import, print_function
+
+'''
>>> p = np.poly1d([1.,2,3])
>>> p
poly1d([ 1., 2., 3.])
@@ -74,8 +76,8 @@ poly1d([ 2.])
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
(poly1d([ 1., -1.]), poly1d([ 0.]))
-"""
+'''
from numpy.testing import *
import numpy as np
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 3d2d1e983..ef22ca413 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import numpy as np
@@ -541,11 +543,11 @@ class TestStackArrays(TestCase):
class TestJoinBy(TestCase):
def setUp(self):
- self.a = np.array(zip(np.arange(10), np.arange(50, 60),
- np.arange(100, 110)),
+ self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
- self.b = np.array(zip(np.arange(5, 15), np.arange(65, 75),
- np.arange(100, 110)),
+ self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
+ np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
#
def test_inner_join(self):
@@ -618,11 +620,11 @@ class TestJoinBy(TestCase):
class TestJoinBy2(TestCase):
@classmethod
def setUp(cls):
- cls.a = np.array(zip(np.arange(10), np.arange(50, 60),
- np.arange(100, 110)),
+ cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
- cls.b = np.array(zip(np.arange(10), np.arange(65, 75),
- np.arange(100, 110)),
+ cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
+ np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
@@ -658,12 +660,12 @@ class TestJoinBy2(TestCase):
assert_equal(test, control)
def test_two_keys_two_vars(self):
- a = np.array(zip(np.tile([10,11],5),np.repeat(np.arange(5),2),
- np.arange(50, 60), np.arange(10,20)),
+ a = np.array(list(zip(np.tile([10,11],5),np.repeat(np.arange(5),2),
+ np.arange(50, 60), np.arange(10,20))),
dtype=[('k', int), ('a', int), ('b', int),('c',int)])
- b = np.array(zip(np.tile([10,11],5),np.repeat(np.arange(5),2),
- np.arange(65, 75), np.arange(0,10)),
+ b = np.array(list(zip(np.tile([10,11],5),np.repeat(np.arange(5),2),
+ np.arange(65, 75), np.arange(0,10))),
dtype=[('k', int), ('a', int), ('b', int), ('c',int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 71400d112..1e9bacdf5 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -1,6 +1,11 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount
-import numpy as np
+from numpy.compat import unicode
rlevel = 1
@@ -200,7 +205,11 @@ class TestRegression(TestCase):
def test_loadtxt_fields_subarrays(self):
# For ticket #1936
- from StringIO import StringIO
+ if sys.version_info[0] >= 3:
+ from io import StringIO
+ else:
+ from StringIO import StringIO
+
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
@@ -218,5 +227,14 @@ class TestRegression(TestCase):
data = [((((0,1), (2,3), (4,5)), ((6,7), (8,9), (10,11))),)]
assert_equal(x, np.array(data, dtype=dt))
+ def test_nansum_with_boolean(self):
+ # gh-2978
+ a = np.zeros(2, dtype=np.bool)
+ try:
+ np.nansum(a)
+ except:
+ raise AssertionError()
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 56178e8af..a92ddde83 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy.lib import *
from numpy.core import *
@@ -313,6 +315,21 @@ class TestTile(TestCase):
assert_equal(large, klarge)
+class TestMayShareMemory(TestCase):
+ def test_basic(self):
+ d = ones((50, 60))
+ d2 = ones((30, 60, 6))
+ self.assertTrue(may_share_memory(d, d))
+ self.assertTrue(may_share_memory(d, d[::-1]))
+ self.assertTrue(may_share_memory(d, d[::2]))
+ self.assertTrue(may_share_memory(d, d[1:, ::-1]))
+
+ self.assertFalse(may_share_memory(d[::-1], d2))
+ self.assertFalse(may_share_memory(d[::2], d2))
+ self.assertFalse(may_share_memory(d[1:, ::-1], d2))
+ self.assertTrue(may_share_memory(d2[1:, ::-1], d2))
+
+
# Utility
def compare_results(res,desired):
for i in range(len(desired)):
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 814f2d614..f815b247f 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import *
from numpy.lib.stride_tricks import broadcast_arrays
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 7e7900090..7e590c1db 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -1,6 +1,7 @@
-""" Test functions for matrix module
+"""Test functions for matrix module
"""
+from __future__ import division, absolute_import, print_function
from numpy.testing import *
@@ -190,7 +191,7 @@ class TestHistogram2d(TestCase):
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
- H,xedges,yedges = histogram2d(range(10),range(10))
+ H,xedges,yedges = histogram2d(list(range(10)),list(range(10)))
assert_array_equal(H, eye(10,10))
assert_array_equal(xedges, np.linspace(0,9,11))
assert_array_equal(yedges, np.linspace(0,9,11))
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 0f8927614..8b01a974a 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -1,7 +1,11 @@
-from numpy.testing import *
+from __future__ import division, absolute_import, print_function
+
from numpy.lib import *
from numpy.core import *
-from numpy.compat import asbytes
+from numpy.random import rand
+from numpy.compat import asbytes, long
+from numpy.testing import (
+ TestCase, assert_, assert_equal, assert_array_equal, run_module_suite)
try:
import ctypes
@@ -85,7 +89,7 @@ class TestIsscalar(TestCase):
assert_(not isscalar([3]))
assert_(not isscalar((3,)))
assert_(isscalar(3j))
- assert_(isscalar(10L))
+ assert_(isscalar(long(10)))
assert_(isscalar(4.0))
@@ -161,25 +165,16 @@ class TestIsnan(TestCase):
assert_all(alltrue(res,axis=0))
def test_posinf(self):
- olderr = seterr(divide='ignore')
- try:
+ with errstate(divide='ignore'):
assert_all(isnan(array((1.,))/0.) == 0)
- finally:
- seterr(**olderr)
def test_neginf(self):
- olderr = seterr(divide='ignore')
- try:
+ with errstate(divide='ignore'):
assert_all(isnan(array((-1.,))/0.) == 0)
- finally:
- seterr(**olderr)
def test_ind(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isnan(array((0.,))/0.) == 1)
- finally:
- seterr(**olderr)
#def test_qnan(self): log(-1) return pi*j now
# assert_all(isnan(log(-1.)) == 1)
@@ -191,11 +186,8 @@ class TestIsnan(TestCase):
assert_all(isnan(1+1j) == 0)
def test_complex1(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isnan(array(0+0j)/0.) == 1)
- finally:
- seterr(**olderr)
class TestIsfinite(TestCase):
@@ -206,25 +198,16 @@ class TestIsfinite(TestCase):
assert_all(alltrue(res,axis=0))
def test_posinf(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((1.,))/0.) == 0)
- finally:
- seterr(**olderr)
def test_neginf(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((-1.,))/0.) == 0)
- finally:
- seterr(**olderr)
def test_ind(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array((0.,))/0.) == 0)
- finally:
- seterr(**olderr)
#def test_qnan(self):
# assert_all(isfinite(log(-1.)) == 0)
@@ -236,11 +219,8 @@ class TestIsfinite(TestCase):
assert_all(isfinite(1+1j) == 1)
def test_complex1(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isfinite(array(1+1j)/0.) == 0)
- finally:
- seterr(**olderr)
class TestIsinf(TestCase):
@@ -251,39 +231,24 @@ class TestIsinf(TestCase):
assert_all(alltrue(res,axis=0))
def test_posinf(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((1.,))/0.) == 1)
- finally:
- seterr(**olderr)
def test_posinf_scalar(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array(1.,)/0.) == 1)
- finally:
- seterr(**olderr)
def test_neginf(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((-1.,))/0.) == 1)
- finally:
- seterr(**olderr)
def test_neginf_scalar(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array(-1.)/0.) == 1)
- finally:
- seterr(**olderr)
def test_ind(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
assert_all(isinf(array((0.,))/0.) == 0)
- finally:
- seterr(**olderr)
#def test_qnan(self):
# assert_all(isinf(log(-1.)) == 0)
@@ -293,23 +258,18 @@ class TestIsinf(TestCase):
class TestIsposinf(TestCase):
def test_generic(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
vals = isposinf(array((-1.,0,1))/0.)
- finally:
- seterr(**olderr)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(TestCase):
+
def test_generic(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
vals = isneginf(array((-1.,0,1))/0.)
- finally:
- seterr(**olderr)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
@@ -318,11 +278,8 @@ class TestIsneginf(TestCase):
class TestNanToNum(TestCase):
def test_generic(self):
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(array((-1.,0,1))/0.)
- finally:
- seterr(**olderr)
assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2]))
@@ -336,23 +293,17 @@ class TestNanToNum(TestCase):
assert_all(vals == 1+1j)
def test_complex_bad(self):
- v = 1+1j
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
v += array(0+1.j)/0.
- finally:
- seterr(**olderr)
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(isfinite(vals))
def test_complex_bad2(self):
- v = 1+1j
- olderr = seterr(divide='ignore', invalid='ignore')
- try:
+ with errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
v += array(-1+1.j)/0.
- finally:
- seterr(**olderr)
vals = nan_to_num(v)
assert_all(isfinite(vals))
#assert_all(vals.imag > 1e10) and assert_all(isfinite(vals))
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 29b47f257..50f3229e8 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy.core as nx
import numpy.lib.ufunclike as ufl
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 80f90f04b..4062d35b5 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -1,8 +1,14 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
from numpy.testing import *
import numpy.lib.utils as utils
from numpy.lib import deprecate
-from StringIO import StringIO
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
def test_lookfor():
out = StringIO()
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index c1f75c630..a39f60220 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -1,6 +1,7 @@
""" Basic functions for manipulating 2d arrays
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
@@ -521,17 +522,19 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
Parameters
----------
- x : array_like, shape(N,)
- A sequence of values to be histogrammed along the first dimension.
- y : array_like, shape(M,)
- A sequence of values to be histogrammed along the second dimension.
+ x : array_like, shape (N,)
+ An array containing the x coordinates of the points to be histogrammed.
+ y : array_like, shape (N,)
+ An array containing the y coordinates of the points to be histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
- * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
- * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
+ * If array_like, the bin edges for the two dimensions
+ (x_edges=y_edges=bins).
+ * If [array, array], the bin edges in each dimension
+ (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
@@ -560,8 +563,8 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
See Also
--------
- histogram: 1D histogram
- histogramdd: Multidimensional histogram
+ histogram : 1D histogram
+ histogramdd : Multidimensional histogram
Notes
-----
@@ -572,7 +575,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
- the area of bin `{i,j}`.
+ the area of bin ``{i,j}``.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
@@ -582,19 +585,58 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
Examples
--------
- >>> x, y = np.random.randn(2, 100)
- >>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
- >>> H.shape, xedges.shape, yedges.shape
- ((5, 8), (6,), (9,))
+ >>> import matplotlib as mpl
+ >>> import matplotlib.pyplot as plt
- We can now use the Matplotlib to visualize this 2-dimensional histogram:
+ Construct a 2D-histogram with variable bin width. First define the bin
+ edges:
- >>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
- >>> import matplotlib.pyplot as plt
- >>> plt.imshow(H, extent=extent, interpolation='nearest')
- <matplotlib.image.AxesImage object at ...>
- >>> plt.colorbar()
- <matplotlib.colorbar.Colorbar instance at ...>
+ >>> xedges = [0, 1, 1.5, 3, 5]
+ >>> yedges = [0, 2, 3, 4, 6]
+
+ Next we create a histogram H with random bin content:
+
+ >>> x = np.random.normal(3, 1, 100)
+ >>> y = np.random.normal(1, 1, 100)
+ >>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
+
+ Or we fill the histogram H with a determined bin content:
+
+ >>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
+ >>> print H[::-1] # This shows the bin content in the order as plotted
+ [[ 13. 14. 15. 16.]
+ [ 9. 10. 11. 12.]
+ [ 5. 6. 7. 8.]
+ [ 1. 2. 3. 4.]]
+
+ Imshow can only do an equidistant representation of bins:
+
+ >>> fig = plt.figure(figsize=(7, 3))
+ >>> ax = fig.add_subplot(131)
+ >>> ax.set_title('imshow:\nequidistant')
+ >>> im = plt.imshow(H, interpolation='nearest', origin='low',
+ extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
+
+ pcolormesh can displaying exact bin edges:
+
+ >>> ax = fig.add_subplot(132)
+ >>> ax.set_title('pcolormesh:\nexact bin edges')
+ >>> X, Y = np.meshgrid(xedges, yedges)
+ >>> ax.pcolormesh(X, Y, H)
+ >>> ax.set_aspect('equal')
+
+ NonUniformImage displays exact bin edges with interpolation:
+
+ >>> ax = fig.add_subplot(133)
+ >>> ax.set_title('NonUniformImage:\ninterpolated')
+ >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
+ >>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
+ >>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
+ >>> im.set_data(xcenters, ycenters, H)
+ >>> ax.images.append(im)
+ >>> ax.set_xlim(xedges[0], xedges[-1])
+ >>> ax.set_ylim(yedges[0], yedges[-1])
+ >>> ax.set_aspect('equal')
>>> plt.show()
"""
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index e22d63156..da9e13847 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -1,4 +1,7 @@
-## Automatically adapted for numpy Sep 19, 2005 by convertcode.py
+"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
'isreal','nan_to_num','real','real_if_close',
@@ -8,7 +11,7 @@ __all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, array, isnan, \
obj2sctype, zeros
-from ufunclike import isneginf, isposinf
+from .ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
@@ -55,7 +58,7 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'):
'G'
"""
- typecodes = [(type(t) is type('') and t) or asarray(t).dtype.char\
+ typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char\
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
@@ -448,7 +451,8 @@ def asscalar(a):
Returns
-------
out : scalar
- Scalar representation of `a`. The input data type is preserved.
+ Scalar representation of `a`. The output data type is the same type
+ returned by the input's `item` method.
Examples
--------
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index b51365f41..e91f64d0e 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -1,7 +1,10 @@
"""
Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
+
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index 01b0b1c19..d675d3702 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -2,12 +2,17 @@
Standard container-class for easy multiple-inheritance.
Try to inherit from the ndarray instead of using this class as this is not
complete.
+
"""
+from __future__ import division, absolute_import, print_function
-from numpy.core import array, asarray, absolute, add, subtract, multiply, \
- divide, remainder, power, left_shift, right_shift, bitwise_and, \
- bitwise_or, bitwise_xor, invert, less, less_equal, not_equal, equal, \
- greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose
+from numpy.core import (
+ array, asarray, absolute, add, subtract, multiply, divide,
+ remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
+ bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
+ greater_equal, shape, reshape, arange, sin, sqrt, transpose
+ )
+from numpy.compat import long
class container(object):
def __init__(self, data, dtype=None, copy=True):
@@ -203,15 +208,15 @@ if __name__ == '__main__':
ua=container(temp)
# new object created begin test
- print dir(ua)
- print shape(ua),ua.shape # I have changed Numeric.py
+ print(dir(ua))
+ print(shape(ua),ua.shape) # I have changed Numeric.py
ua_small=ua[:3,:5]
- print ua_small
+ print(ua_small)
ua_small[0,0]=10 # this did not change ua[0,0], which is not normal behavior
- print ua_small[0,0],ua[0,0]
- print sin(ua_small)/3.*6.+sqrt(ua_small**2)
- print less(ua_small,103),type(less(ua_small,103))
- print type(ua_small*reshape(arange(15),shape(ua_small)))
- print reshape(ua_small,(5,3))
- print transpose(ua_small)
+ print(ua_small[0,0],ua[0,0])
+ print(sin(ua_small)/3.*6.+sqrt(ua_small**2))
+ print(less(ua_small,103),type(less(ua_small,103)))
+ print(type(ua_small*reshape(arange(15),shape(ua_small))))
+ print(reshape(ua_small,(5,3)))
+ print(transpose(ua_small))
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index dc6c16767..f54946722 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
import types
@@ -9,7 +11,7 @@ from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
- 'may_share_memory', 'safe_eval']
+ 'safe_eval']
def get_include():
"""
@@ -82,17 +84,10 @@ def get_numarray_include(type=None):
return include_dirs + [get_include()]
-if sys.version_info < (2, 4):
- # Can't set __name__ in 2.3
- import new
- def _set_function_name(func, name):
- func = new.function(func.func_code, func.func_globals,
- name, func.func_defaults, func.func_closure)
- return func
-else:
- def _set_function_name(func, name):
- func.__name__ = name
- return func
+def _set_function_name(func, name):
+ func.__name__ = name
+ return func
+
class _Deprecate(object):
"""
@@ -122,7 +117,7 @@ class _Deprecate(object):
import warnings
if old_name is None:
try:
- old_name = func.func_name
+ old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
@@ -255,12 +250,11 @@ def byte_bounds(a):
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
- nd_a = len(ashape)
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
- a_high += product(ashape, dtype=int)*bytes_a
+ a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
@@ -271,36 +265,6 @@ def byte_bounds(a):
return a_low, a_high
-def may_share_memory(a, b):
- """
- Determine if two arrays can share memory
-
- The memory-bounds of a and b are computed. If they overlap then
- this function returns True. Otherwise, it returns False.
-
- A return of True does not necessarily mean that the two arrays
- share any element. It just means that they *might*.
-
- Parameters
- ----------
- a, b : ndarray
-
- Returns
- -------
- out : bool
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
-
- """
- a_low, a_high = byte_bounds(a)
- b_low, b_high = byte_bounds(b)
- if b_low >= a_high or a_low >= b_high:
- return False
- return True
-
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
@@ -391,15 +355,15 @@ def who(vardict=None):
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
- print prval + "\n" + "="*(len(prval)+5) + "\n"
+ print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
- print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
+ print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
- val[3])
- print "\nUpper bound on total bytes = %d" % totalbytes
+ val[3]))
+ print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
@@ -440,7 +404,7 @@ def _makenamedict(module='numpy'):
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
- while 1:
+ while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
@@ -526,22 +490,22 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
- print >> output, "\n *** Repeat reference found in %s *** " % namestr
+ print("\n *** Repeat reference found in %s *** " % namestr, file=output)
else:
objlist.append(id(obj))
- print >> output, " *** Found in %s ***" % namestr
+ print(" *** Found in %s ***" % namestr, file=output)
info(obj)
- print >> output, "-"*maxwidth
+ print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
- print >> output, "Help for %s not found." % object
+ print("Help for %s not found." % object, file=output)
else:
- print >> output, "\n *** Total of %d references found. ***" % numfound
+ print("\n *** Total of %d references found. ***" % numfound, file=output)
elif inspect.isfunction(object):
- name = object.func_name
+ name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
@@ -549,15 +513,15 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
else:
argstr = name + arguments
- print >> output, " " + argstr + "\n"
- print >> output, inspect.getdoc(object)
+ print(" " + argstr + "\n", file=output)
+ print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
+ arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -570,30 +534,30 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
else:
argstr = name + arguments
- print >> output, " " + argstr + "\n"
+ print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
- print >> output, inspect.getdoc(object.__init__)
+ print(inspect.getdoc(object.__init__), file=output)
else:
- print >> output, inspect.getdoc(object)
+ print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if methods != []:
- print >> output, "\n\nMethods:\n"
+ print("\n\nMethods:\n", file=output)
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
- print >> output, " %s -- %s" % (meth, methstr)
+ print(" %s -- %s" % (meth, methstr), file=output)
- elif type(object) is types.InstanceType: ## check for __call__ method
- print >> output, "Instance of class: ", object.__class__.__name__
- print >> output
+ elif isinstance(object, types.InstanceType): ## check for __call__ method
+ print("Instance of class: ", object.__class__.__name__, file=output)
+ print(file=output)
if hasattr(object, '__call__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
+ arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -610,18 +574,18 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
else:
argstr = name + arguments
- print >> output, " " + argstr + "\n"
+ print(" " + argstr + "\n", file=output)
doc = inspect.getdoc(object.__call__)
if doc is not None:
- print >> output, inspect.getdoc(object.__call__)
- print >> output, inspect.getdoc(object)
+ print(inspect.getdoc(object.__call__), file=output)
+ print(inspect.getdoc(object), file=output)
else:
- print >> output, inspect.getdoc(object)
+ print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
- arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
+ arguments = inspect.formatargspec(*inspect.getargspec(object.__func__))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -634,11 +598,11 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
else:
argstr = name + arguments
- print >> output, " " + argstr + "\n"
- print >> output, inspect.getdoc(object)
+ print(" " + argstr + "\n", file=output)
+ print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
- print >> output, inspect.getdoc(object)
+ print(inspect.getdoc(object), file=output)
def source(object, output=sys.stdout):
@@ -682,10 +646,10 @@ def source(object, output=sys.stdout):
# Local import to speed up numpy's import time.
import inspect
try:
- print >> output, "In file: %s\n" % inspect.getsourcefile(object)
- print >> output, inspect.getsource(object)
+ print("In file: %s\n" % inspect.getsourcefile(object), file=output)
+ print(inspect.getsource(object), file=output)
except:
- print >> output, "Not available for this object."
+ print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
@@ -753,7 +717,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
whats = str(what).lower().split()
if not whats: return
- for name, (docstring, kind, index) in cache.iteritems():
+ for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
@@ -821,7 +785,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
- print "\n".join(help_text)
+ print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
@@ -833,7 +797,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
- regenerate: bool
+ regenerate : bool
Re-generate the docstring cache
Returns
@@ -846,7 +810,12 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
- from cStringIO import StringIO
+
+ if sys.version_info[0] >= 3:
+ # In Python3 stderr, stdout are text files.
+ from io import StringIO
+ else:
+ from StringIO import StringIO
if module is None:
module = "numpy"
@@ -1138,27 +1107,23 @@ def safe_eval(source):
"""
# Local imports to speed up numpy's import time.
import warnings
- from numpy.testing.utils import WarningManager
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+
+ with warnings.catch_warnings():
# compiler package is deprecated for 3.x, which is already solved here
warnings.simplefilter('ignore', DeprecationWarning)
try:
import compiler
except ImportError:
import ast as compiler
- finally:
- warn_ctx.__exit__()
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
- except SyntaxError, err:
+ except SyntaxError as err:
raise
try:
return walker.visit(ast)
- except SyntaxError, err:
+ except SyntaxError as err:
raise
#-----------------------------------------------------------------------------
diff --git a/numpy/linalg/SConscript b/numpy/linalg/SConscript
deleted file mode 100644
index 78c4d569b..000000000
--- a/numpy/linalg/SConscript
+++ /dev/null
@@ -1,23 +0,0 @@
-# Last Change: Thu Jun 12 06:00 PM 2008 J
-# vim:syntax=python
-from numscons import GetNumpyEnvironment, scons_get_mathlib
-from numscons import CheckF77LAPACK
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK})
-
-use_lapack = config.CheckLAPACK()
-
-mlib = scons_get_mathlib(env)
-env.AppendUnique(LIBS = mlib)
-
-config.Finish()
-write_info(env)
-
-sources = ['lapack_litemodule.c']
-if not use_lapack:
- sources.extend(['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c',
- 'blas_lite.c', 'dlamch.c', 'f2c_lite.c'])
-env.NumpyPythonExtension('lapack_lite', source = sources)
diff --git a/numpy/linalg/SConstruct b/numpy/linalg/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/linalg/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index a74a31950..2513e35c0 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -42,10 +42,12 @@ LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
+from __future__ import division, absolute_import, print_function
+
# To get sub-modules
-from info import __doc__
+from .info import __doc__
-from linalg import *
+from .linalg import *
from numpy.testing import Tester
test = Tester().test
diff --git a/numpy/linalg/_gufuncs_linalg.py b/numpy/linalg/_gufuncs_linalg.py
new file mode 100644
index 000000000..169d39f86
--- /dev/null
+++ b/numpy/linalg/_gufuncs_linalg.py
@@ -0,0 +1,1454 @@
+"""Linear Algebra functions implemented as gufuncs, so they broadcast.
+
+.. warning:: This module is only for testing, the functionality will be
+ integrated into numpy.linalg proper.
+
+=======================
+ gufuncs_linalg module
+=======================
+
+gufuncs_linalg implements a series of linear algebra functions as gufuncs.
+Most of these functions are already present in numpy.linalg, but as they
+are implemented using gufunc kernels they can be broadcasting. Some parts
+that are python in numpy.linalg are implemented inside C functions, as well
+as the iteration used when used on vectors. This can result in faster
+execution as well.
+
+In addition, there are some ufuncs thrown in that implement fused operations
+over numpy vectors that can result in faster execution on large vector
+compared to non-fused versions (for example: multiply_add, multiply3).
+
+In fact, gufuncs_linalg is a very thin wrapper of python code that wraps
+the actual kernels (gufuncs). This wrapper was needed in order to provide
+a sane interface for some functions. Mostly working around limitations on
+what can be described in a gufunc signature. Things like having one dimension
+of a result depending on the minimum of two dimensions of the sources (like
+in svd) or passing an uniform keyword parameter to the whole operation
+(like UPLO on functions over symmetric/hermitian matrices).
+
+The gufunc kernels are in a c module named _umath_linalg, that is imported
+privately in gufuncs_linalg.
+
+==========
+ Contents
+==========
+
+Here is an enumeration of the functions. These are the functions exported by
+the module and should appear in its __all__ attribute. All the functions
+contain a docstring explaining them in detail.
+
+General
+=======
+- inner1d
+- innerwt
+- matrix_multiply
+- quadratic_form
+
+Lineal Algebra
+==============
+- det
+- slogdet
+- cholesky
+- eig
+- eigvals
+- eigh
+- eigvalsh
+- solve
+- svd
+- chosolve
+- inv
+- poinv
+
+Fused Operations
+================
+- add3
+- multiply3
+- multiply3_add
+- multiply_add
+- multiply_add2
+- multiply4
+- multiply4_add
+
+================
+ Error Handling
+================
+Unlike the numpy.linalg module, this module does not use exceptions to notify
+errors in the execution of the kernels. As these functions are thougth to be
+used in a vector way it didn't seem appropriate to raise exceptions on failure
+of an element. So instead, when an error computing an element occurs its
+associated result will be set to an invalid value (all NaNs).
+
+Exceptions can occur if the arguments fail to map properly to the underlying
+gufunc (due to signature mismatch, for example).
+
+================================
+ Notes about the implementation
+================================
+Where possible, the wrapper functions map directly into a gufunc implementing
+the computation.
+
+That's not always the case, as due to limitations of the gufunc interface some
+functions cannot be mapped straight into a kernel.
+
+Two cases come to mind:
+- An uniform parameter is needed to configure the way the computation is
+performed (like UPLO in the functions working on symmetric/hermitian matrices)
+- svd, where it was impossible to map the function to a gufunc signature.
+
+In the case of uniform parameters like UPLO, there are two separate entry points
+in the C module that imply either 'U' or 'L'. The wrapper just selects the
+kernel to use by checking the appropriate keyword parameter. This way a
+function interface similar to numpy.linalg can be kept.
+
+In the case of SVD not only there were problems with the support of keyword
+arguments. There was the added problem of the signature system not being able
+to cope with the needs of this functions. Just for the singular values a
+a signature like (m,n)->(min(m,n)) was needed. This has been worked around by
+implementing different kernels for the cases where min(m,n) == m and where
+min(m,n) == n. The wrapper code automatically selects the appropriate one.
+
+
+"""
+
+from __future__ import division, absolute_import, print_function
+
+
+__all__ = ['inner1d', 'dotc1d', 'innerwt', 'matrix_multiply', 'det', 'slogdet',
+ 'inv', 'cholesky', 'quadratic_form', 'add3', 'multiply3',
+ 'multiply3_add', 'multiply_add', 'multiply_add2', 'multiply4',
+ 'multiply4_add', 'eig', 'eigvals', 'eigh', 'eigvalsh', 'solve',
+ 'svd', 'chosolve', 'poinv']
+
+import numpy as np
+
+from . import _umath_linalg as _impl
+
+
+def inner1d(a, b, **kwargs):
+ """
+ Compute the dot product of vectors over the inner dimension, with
+ broadcasting.
+
+ Parameters
+ ----------
+ a : (..., N) array
+ Input array
+ b : (..., N) array
+ Input array
+
+ Returns
+ -------
+ inner : (...) array
+ dot product over the inner dimension.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply when matching dimensions.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ For single and double types this is equivalent to dotc1d.
+
+ Maps to Blas functions sdot, ddot, cdotu and zdotu.
+
+ See Also
+ --------
+ dotc1d : dot product conjugating first vector.
+ innerwt : weighted (i.e. triple) inner product.
+
+ Examples
+ --------
+ >>> a = np.arange(1,5).reshape(2,2)
+ >>> b = np.arange(1,8,2).reshape(2,2)
+ >>> res = inner1d(a,b)
+ >>> res.shape
+ (2,)
+ >>> print res
+ [ 7. 43.]
+
+ """
+ return _impl.inner1d(a, b, **kwargs)
+
+
+def dotc1d(a, b, **kwargs):
+ """
+ Compute the dot product of vectors over the inner dimension, conjugating
+ the first vector, with broadcasting
+
+ Parameters
+ ----------
+ a : (..., N) array
+ Input array
+ b : (..., N) array
+ Input array
+
+ Returns
+ -------
+ dotc : (...) array
+ dot product conjugating the first vector over the inner
+ dimension.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply when matching dimensions.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ For single and double types this is equivalent to inner1d.
+
+ Maps to Blas functions sdot, ddot, cdotc and zdotc.
+
+ See Also
+ --------
+ inner1d : dot product
+ innerwt : weighted (i.e. triple) inner product.
+
+ Examples
+ --------
+ >>> a = np.arange(1,5).reshape(2,2)
+ >>> b = np.arange(1,8,2).reshape(2,2)
+ >>> res = inner1d(a,b)
+ >>> res.shape
+ (2,)
+ >>> print res
+ [ 7. 43.]
+
+ """
+ return _impl.dotc1d(a, b, **kwargs)
+
+
+def innerwt(a, b, c, **kwargs):
+ """
+ Compute the weighted (i.e. triple) inner product, with
+ broadcasting.
+
+ Parameters
+ ----------
+ a, b, c : (..., N) array
+ Input arrays
+
+ Returns
+ -------
+ inner : (...) array
+ The weighted (i.e. triple) inner product.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ inner1d : inner product.
+ dotc1d : dot product conjugating first vector.
+
+ Examples
+ --------
+ >>> a = np.arange(1,5).reshape(2,2)
+ >>> b = np.arange(1,8,2).reshape(2,2)
+ >>> c = np.arange(0.25,1.20,0.25).reshape(2,2)
+ >>> res = innerwt(a,b,c)
+ >>> res.shape
+ (2,)
+ >>> res
+ array([ 3.25, 39.25])
+
+ """
+ return _impl.innerwt(a, b, c, **kwargs)
+
+
+def matrix_multiply(a,b,**kwargs):
+ """
+ Compute matrix multiplication, with broadcasting
+
+ Parameters
+ ----------
+ a : (..., M, N) array
+ Input array.
+ b : (..., N, P) array
+ Input array.
+
+ Returns
+ -------
+ r : (..., M, P) array matrix multiplication of a and b over any number of
+ outer dimensions
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Matrix multiplication is computed using BLAS _gemm functions.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Examples
+ --------
+ >>> a = np.arange(1,17).reshape(2,2,4)
+ >>> b = np.arange(1,25).reshape(2,4,3)
+ >>> res = matrix_multiply(a,b)
+ >>> res.shape
+ (2, 2, 3)
+ >>> res
+ array([[[ 70., 80., 90.],
+ [ 158., 184., 210.]],
+ <BLANKLINE>
+ [[ 750., 792., 834.],
+ [ 1030., 1088., 1146.]]])
+
+ """
+ return _impl.matrix_multiply(a,b,**kwargs)
+
+
+def det(a, **kwargs):
+ """
+ Compute the determinant of arrays, with broadcasting.
+
+ Parameters
+ ----------
+ a : (NDIMS, M, M) array
+ Input array. Its inner dimensions must be those of a square 2-D array.
+
+ Returns
+ -------
+ det : (NDIMS) array
+ Determinants of `a`
+
+ See Also
+ --------
+ slogdet : Another representation for the determinant, more suitable
+ for large matrices where underflow/overflow may occur
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The determinants are computed via LU factorization using the LAPACK
+ routine _getrf.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Examples
+ --------
+ The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.allclose(-2.0, det(a))
+ True
+
+ >>> a = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]] ])
+ >>> np.allclose(-2.0, det(a))
+ True
+
+ """
+ return _impl.det(a, **kwargs)
+
+
+def slogdet(a, **kwargs):
+ """
+ Compute the sign and (natural) logarithm of the determinant of an array,
+ with broadcasting.
+
+ If an array has a very small or very large determinant, then a call to
+ `det` may overflow or underflow. This routine is more robust against such
+ issues, because it computes the logarithm of the determinant rather than
+ the determinant itself
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Input array. Its inner dimensions must be those of a square 2-D array.
+
+ Returns
+ -------
+ sign : (...) array
+ An array of numbers representing the sign of the determinants. For real
+ matrices, this is 1, 0, or -1. For complex matrices, this is a complex
+ number with absolute value 1 (i.e., it is on the unit circle), or else
+ 0.
+ logdet : (...) array
+ The natural log of the absolute value of the determinant. This is always
+ a real type.
+
+ If the determinant is zero, then `sign` will be 0 and `logdet` will be -Inf.
+ In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
+
+ See Also
+ --------
+ det
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The determinants are computed via LU factorization using the LAPACK
+ routine _getrf.
+
+ Implemented for types single, double, csingle and cdouble. Numpy conversion
+ rules apply. For complex versions `logdet` will be of the associated real
+ type (single for csingle, double for cdouble).
+
+ Examples
+ --------
+ The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> (sign, logdet) = slogdet(a)
+ >>> sign.shape
+ ()
+ >>> logdet.shape
+ ()
+ >>> np.allclose(-2.0, sign * np.exp(logdet))
+ True
+
+ >>> a = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]] ])
+ >>> (sign, logdet) = slogdet(a)
+ >>> sign.shape
+ (2,)
+ >>> logdet.shape
+ (2,)
+ >>> np.allclose(-2.0, sign * np.exp(logdet))
+ True
+
+ """
+ return _impl.slogdet(a, **kwargs)
+
+
+def inv(a, **kwargs):
+ """
+ Compute the (multiplicative) inverse of matrices, with broadcasting.
+
+ Given a square matrix `a`, return the matrix `ainv` satisfying
+ ``matrix_multiply(a, ainv) = matrix_multiply(ainv, a) = Identity matrix``
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Matrices to be inverted
+
+ Returns
+ -------
+ ainv : (..., M, M) array
+ (Multiplicative) inverse of the `a` matrices.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Singular matrices and thus, not invertible, result in an array of NaNs.
+
+ See Also
+ --------
+ poinv : compute the multiplicative inverse of hermitian/symmetric matrices,
+ using cholesky decomposition.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> ainv = inv(a)
+ >>> np.allclose(matrix_multiply(a, ainv), np.eye(2))
+ True
+ >>> np.allclose(matrix_multiply(ainv, a), np.eye(2))
+ True
+
+ """
+ return _impl.inv(a, **kwargs)
+
+
+def cholesky(a, UPLO='L', **kwargs):
+ """
+ Compute the cholesky decomposition of `a`, with broadcasting
+
+ The Cholesky decomposition (or Cholesky triangle) is a decomposition of a
+ Hermitian, positive-definite matrix into the product of a lower triangular
+ matrix and its conjugate transpose.
+
+ A = LL*
+
+ where L* is the positive-definite matrix.
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Matrices for which compute the cholesky decomposition
+
+ Returns
+ -------
+ l : (..., M, M) array
+ Matrices for each element where each entry is the lower triangular
+ matrix with strictly positive diagonal entries such that a = ll* for
+ all outer dimensions
+
+ See Also
+ --------
+ chosolve : solve a system using cholesky decomposition
+ poinv : compute the inverse of a matrix using cholesky decomposition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Decomposition is performed using LAPACK routine _potrf.
+
+ For elements where the LAPACK routine fails, the result will be set to NaNs.
+
+ If an element of the source array is not a positive-definite matrix the
+ result for that element is undefined.
+
+ Examples
+ --------
+ >>> A = np.array([[1,-2j],[2j,5]])
+ >>> A
+ array([[ 1.+0.j, 0.-2.j],
+ [ 0.+2.j, 5.+0.j]])
+ >>> L = cholesky(A)
+ >>> L
+ array([[ 1.+0.j, 0.+0.j],
+ [ 0.+2.j, 1.+0.j]])
+
+ """
+ if 'L' == UPLO:
+ gufunc = _impl.cholesky_lo
+ else:
+ gufunc = _impl.cholesky_up
+
+ return gufunc(a, **kwargs)
+
+
+def eig(a, **kwargs):
+ """
+ Compute the eigenvalues and right eigenvectors of square arrays,
+ with broadcasting
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Matrices for which the eigenvalues and right eigenvectors will
+ be computed
+
+ Returns
+ -------
+ w : (..., M) array
+ The eigenvalues, each repeated according to its multiplicity.
+ The eigenvalues are not necessarily ordered. The resulting
+ array will be always be of complex type. When `a` is real
+ the resulting eigenvalues will be real (0 imaginary part) or
+ occur in conjugate pairs
+
+ v : (..., M, M) array
+ The normalized (unit "length") eigenvectors, such that the
+ column ``v[:,i]`` is the eigenvector corresponding to the
+ eigenvalue ``w[i]``.
+
+ See Also
+ --------
+ eigvals : eigenvalues of general arrays.
+ eigh : eigenvalues and right eigenvectors of symmetric/hermitian
+ arrays.
+ eigvalsh : eigenvalues of symmetric/hermitian arrays.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ Eigenvalues and eigenvectors for single and double versions will
+ always be typed csingle and cdouble, even if all the results are
+ real (imaginary part will be 0).
+
+ This is implemented using the _geev LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Examples
+ --------
+ First, a utility function to check if eigvals/eigvectors are correct.
+ This checks the definition of eigenvectors. For each eigenvector v
+ with associated eigenvalue w of a matrix M the following equality must
+ hold: Mv == wv
+
+ >>> def check_eigen(M, w, v):
+ ... '''vectorial check of Mv==wv'''
+ ... lhs = matrix_multiply(M, v)
+ ... rhs = w*v
+ ... return np.allclose(lhs, rhs)
+
+ (Almost) Trivial example with real e-values and e-vectors. Note
+ the complex types of the results
+
+ >>> M = np.diag((1,2,3)).astype(float)
+ >>> w, v = eig(M)
+ >>> check_eigen(M, w, v)
+ True
+
+ Real matrix possessing complex e-values and e-vectors; note that the
+ e-values are complex conjugates of each other.
+
+ >>> M = np.array([[1, -1], [1, 1]])
+ >>> w, v = eig(M)
+ >>> check_eigen(M, w, v)
+ True
+
+ Complex-valued matrix with real e-values (but complex-valued e-vectors);
+ note that a.conj().T = a, i.e., a is Hermitian.
+
+ >>> M = np.array([[1, 1j], [-1j, 1]])
+ >>> w, v = eig(M)
+ >>> check_eigen(M, w, v)
+ True
+
+ """
+ return _impl.eig(a, **kwargs)
+
+
+def eigvals(a, **kwargs):
+ """
+ Compute the eigenvalues of general matrices, with broadcasting.
+
+ Main difference between `eigvals` and `eig`: the eigenvectors aren't
+ returned.
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Matrices whose eigenvalues will be computed
+
+ Returns
+ -------
+ w : (..., M) array
+ The eigenvalues, each repeated according to its multiplicity.
+ The eigenvalues are not necessarily ordered. The resulting
+ array will be always be of complex type. When `a` is real
+ the resulting eigenvalues will be real (0 imaginary part) or
+ occur in conjugate pairs
+
+ See Also
+ --------
+ eig : eigenvalues and right eigenvectors of general arrays.
+ eigh : eigenvalues and right eigenvectors of symmetric/hermitian
+ arrays.
+ eigvalsh : eigenvalues of symmetric/hermitian arrays.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ Eigenvalues for single and double versions will always be typed
+ csingle and cdouble, even if all the results are real (imaginary
+ part will be 0).
+
+ This is implemented using the _geev LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Examples
+ --------
+
+ Eigenvalues for a diagonal matrix are its diagonal elements
+
+ >>> D = np.diag((-1,1))
+ >>> eigvals(D)
+ array([-1.+0.j, 1.+0.j])
+
+ Multiplying on the left by an orthogonal matrix, `Q`, and on the
+ right by `Q.T` (the transpose of `Q` preserves the eigenvalues of
+ the original matrix
+
+ >>> x = np.random.random()
+ >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
+ >>> A = matrix_multiply(Q, D)
+ >>> A = matrix_multiply(A, Q.T)
+ >>> eigvals(A)
+ array([-1.+0.j, 1.+0.j])
+
+ """
+ return _impl.eigvals(a, **kwargs)
+
+
+def quadratic_form(u,Q,v, **kwargs):
+ """
+ Compute the quadratic form uQv, with broadcasting
+
+ Parameters
+ ----------
+ u : (..., M) array
+ The u vectors of the quadratic form uQv
+
+ Q : (..., M, N) array
+ The Q matrices of the quadratic form uQv
+
+ v : (..., N) array
+ The v vectors of the quadratic form uQv
+
+ Returns
+ -------
+ qf : (...) array
+ The result of the quadratic forms
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ This is similar to PDL inner2
+
+ Examples
+ --------
+
+ The result in absence of broadcasting is just as np.dot(np.dot(u,Q),v)
+ or np.dot(u, np.dot(Q,v))
+
+ >>> u = np.array([2., 3.])
+ >>> Q = np.array([[1.,1.], [0.,1.]])
+ >>> v = np.array([1.,2.])
+ >>> quadratic_form(u,Q,v)
+ 12.0
+
+ >>> np.dot(np.dot(u,Q),v)
+ 12.0
+
+ >>> np.dot(u, np.dot(Q,v))
+ 12.0
+
+ """
+ return _impl.quadratic_form(u, Q, v, **kwargs)
+
+
+def add3(a, b, c, **kwargs):
+ """
+ Element-wise addition of 3 arrays: a + b + c.
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the addends
+
+ Returns
+ -------
+ add3 : (...) array
+ resulting element-wise addition.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 30.0, 30)
+ >>> add3(a[0::3], a[1::3], a[2::3])
+ array([ 6., 15., 24., 33., 42., 51., 60., 69., 78., 87.])
+
+ """
+ return _impl.add3(a, b, c, **kwargs)
+
+
+def multiply3(a, b, c, **kwargs):
+ """
+ Element-wise multiplication of 3 arrays: a*b*c.
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the factors
+
+ Returns
+ -------
+ m3 : (...) array
+ resulting element-wise product
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply3(a, 1.01, a)
+ array([ 1.01, 4.04, 9.09, 16.16, 25.25, 36.36, 49.49,
+ 64.64, 81.81, 101. ])
+
+ """
+ return _impl.multiply3(a, b, c, **kwargs)
+
+
+def multiply3_add(a, b, c, d, **kwargs):
+ """
+ Element-wise multiplication of 3 arrays adding an element
+ of the a 4th array to the result: a*b*c + d
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the factors
+
+ d : (...) array
+ array with the addend
+
+ Returns
+ -------
+ m3a : (...) array
+ resulting element-wise addition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply3_add(a, 1.01, a, 42e-4)
+ array([ 1.0142, 4.0442, 9.0942, 16.1642, 25.2542, 36.3642,
+ 49.4942, 64.6442, 81.8142, 101.0042])
+
+ """
+ return _impl.multiply3_add(a, b, c, d, **kwargs)
+
+
+def multiply_add(a, b, c, **kwargs):
+ """
+ Element-wise addition of 3 arrays
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the addends
+
+ Returns
+ -------
+ add3 : (...) array
+ resulting element-wise addition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply_add(a, a, 42e-4)
+ array([ 1.0042, 4.0042, 9.0042, 16.0042, 25.0042, 36.0042,
+ 49.0042, 64.0042, 81.0042, 100.0042])
+
+ """
+ return _impl.multiply_add(a, b, c, **kwargs)
+
+
+def multiply_add2(a, b, c, d, **kwargs):
+ """
+ Element-wise addition of 3 arrays
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the addends
+
+ Returns
+ -------
+ add3 : (...) array
+ resulting element-wise addition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply_add2(a, a, a, 42e-4)
+ array([ 2.0042, 6.0042, 12.0042, 20.0042, 30.0042, 42.0042,
+ 56.0042, 72.0042, 90.0042, 110.0042])
+
+ """
+ return _impl.multiply_add2(a, b, c, d, **kwargs)
+
+
+def multiply4(a, b, c, d, **kwargs):
+ """
+ Element-wise addition of 3 arrays
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the addends
+
+ Returns
+ -------
+ add3 : (...) array
+ resulting element-wise addition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply4(a, a, a[::-1], 1.0001)
+ array([ 10.001 , 36.0036, 72.0072, 112.0112, 150.015 , 180.018 ,
+ 196.0196, 192.0192, 162.0162, 100.01 ])
+
+ """
+ return _impl.multiply4(a, b, c, d, **kwargs)
+
+
+def multiply4_add(a, b, c, d, e, **kwargs):
+ """
+ Element-wise addition of 3 arrays
+
+ Parameters
+ ----------
+ a, b, c : (...) array
+ arrays with the addends
+
+ Returns
+ -------
+ add3 : (...) array
+ resulting element-wise addition
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Implemented for types single, double, csingle and cdouble. Numpy
+ conversion rules apply.
+
+ See Also
+ --------
+ add3 : element-wise three-was addition
+ multiply3 : element-wise three-way multiplication.
+ multiply3_add : element-wise three-way multiplication and addition.
+ multiply_add : element-wise multiply-add.
+ multiply_add2 : element-wise multiplication with two additions.
+ multiply4 : element-wise four-way multiplication
+ multiply4_add : element-wise four-way multiplication and addition,
+
+ Examples
+ --------
+ >>> a = np.linspace(1.0, 10.0, 10)
+ >>> multiply4_add(a, a, a[::-1], 1.01, 42e-4)
+ array([ 10.1042, 36.3642, 72.7242, 113.1242, 151.5042, 181.8042,
+ 197.9642, 193.9242, 163.6242, 101.0042])
+
+ """
+ return _impl.multiply4_add(a, b, c, d, e,**kwargs)
+
+
+def eigh(A, UPLO='L', **kw_args):
+ """
+ Computes the eigenvalues and eigenvectors for the square matrices
+ in the inner dimensions of A, being those matrices
+ symmetric/hermitian.
+
+ Parameters
+ ----------
+ A : (..., M, M) array
+ Hermitian/Symmetric matrices whose eigenvalues and
+ eigenvectors are to be computed.
+ UPLO : {'L', 'U'}, optional
+ Specifies whether the calculation is done with the lower
+ triangular part of the elements in `A` ('L', default) or
+ the upper triangular part ('U').
+
+ Returns
+ -------
+ w : (..., M) array
+ The eigenvalues, not necessarily ordered.
+ v : (..., M, M) array
+ The inner dimensions contain matrices with the normalized
+ eigenvectors as columns. The column-numbers are coherent with
+ the associated eigenvalue.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
+ _heevd
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Unlike eig, the results for single and double will always be single
+ and doubles. It is not possible for symmetrical real matrices to result
+ in complex eigenvalues/eigenvectors
+
+ See Also
+ --------
+ eigvalsh : eigenvalues of symmetric/hermitian arrays.
+ eig : eigenvalues and right eigenvectors for general matrices.
+ eigvals : eigenvalues for general matrices.
+
+ Examples
+ --------
+ First, a utility function to check if eigvals/eigvectors are correct.
+ This checks the definition of eigenvectors. For each eigenvector v
+ with associated eigenvalue w of a matrix M the following equality must
+ hold: Mv == wv
+
+ >>> def check_eigen(M, w, v):
+ ... '''vectorial check of Mv==wv'''
+ ... lhs = matrix_multiply(M, v)
+ ... rhs = w*v
+ ... return np.allclose(lhs, rhs)
+
+ A simple example that computes eigenvectors and eigenvalues of
+ a hermitian matrix and checks that A*v = v*w for both pairs of
+ eignvalues(w) and eigenvectors(v)
+
+ >>> M = np.array([[1, -2j], [2j, 1]])
+ >>> w, v = eigh(M)
+ >>> check_eigen(M, w, v)
+ True
+
+ """
+ if 'L' == UPLO:
+ gufunc = _impl.eigh_lo
+ else:
+ gufunc = _impl.eigh_up
+
+ return gufunc(A, **kw_args)
+
+
+def eigvalsh(A, UPLO='L', **kw_args):
+ """
+ Computes the eigenvalues for the square matrices in the inner
+ dimensions of A, being those matrices symmetric/hermitian.
+
+ Parameters
+ ----------
+ A : (..., M, M) array
+ Hermitian/Symmetric matrices whose eigenvalues and
+ eigenvectors are to be computed.
+ UPLO : {'L', 'U'}, optional
+ Specifies whether the calculation is done with the lower
+ triangular part of the elements in `A` ('L', default) or
+ the upper triangular part ('U').
+
+ Returns
+ -------
+ w : (..., M) array
+ The eigenvalues, not necessarily ordered.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Unlike eigvals, the results for single and double will always be single
+ and doubles. It is not possible for symmetrical real matrices to result
+ in complex eigenvalues.
+
+ See Also
+ --------
+ eigh : eigenvalues of symmetric/hermitian arrays.
+ eig : eigenvalues and right eigenvectors for general matrices.
+ eigvals : eigenvalues for general matrices.
+
+ Examples
+ --------
+ eigvalsh results should be the same as the eigenvalues returned by eigh
+
+ >>> a = np.array([[1, -2j], [2j, 5]])
+ >>> w, v = eigh(a)
+ >>> np.allclose(w, eigvalsh(a))
+ True
+
+ eigvalsh on an identity matrix is all ones
+ >>> eigvalsh(np.eye(6))
+ array([ 1., 1., 1., 1., 1., 1.])
+
+ """
+ if ('L' == UPLO):
+ gufunc = _impl.eigvalsh_lo
+ else:
+ gufunc = _impl.eigvalsh_up
+
+ return gufunc(A,**kw_args)
+
+
+def solve(A,B,**kw_args):
+ """
+ Solve the linear matrix equations on the inner dimensions.
+
+ Computes the "exact" solution, `x`. of the well-determined,
+ i.e., full rank, linear matrix equations `ax = b`.
+
+ Parameters
+ ----------
+ A : (..., M, M) array
+ Coefficient matrices.
+ B : (..., M, N) array
+ Ordinate or "dependent variable" values.
+
+ Returns
+ -------
+ X : (..., M, N) array
+ Solutions to the system A X = B for all the outer dimensions
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The solutions are computed using LAPACK routine _gesv
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ See Also
+ --------
+ chosolve : solve a system using cholesky decomposition (for equations
+ having symmetric/hermitian coefficient matrices)
+
+ Examples
+ --------
+ Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
+
+ >>> a = np.array([[3,1], [1,2]])
+ >>> b = np.array([9,8])
+ >>> x = solve(a, b)
+ >>> x
+ array([ 2., 3.])
+
+ Check that the solution is correct:
+
+ >>> np.allclose(np.dot(a, x), b)
+ True
+
+ """
+ if len(B.shape) == (len(A.shape) - 1):
+ gufunc = _impl.solve1
+ else:
+ gufunc = _impl.solve
+
+ return gufunc(A,B,**kw_args)
+
+
+def svd(a, full_matrices=1, compute_uv=1 ,**kw_args):
+ """
+ Singular Value Decomposition on the inner dimensions.
+
+ Factors the matrices in `a` as ``u * np.diag(s) * v``, where `u`
+ and `v` are unitary and `s` is a 1-d array of `a`'s singular
+ values.
+
+ Parameters
+ ----------
+ a : (..., M, N) array
+ The array of matrices to decompose.
+ full_matrices : bool, optional
+ If True (default), `u` and `v` have the shapes (`M`, `M`) and
+ (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
+ and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
+ compute_uv : bool, optional
+ Whether or not to compute `u` and `v` in addition to `s`. True
+ by default.
+
+ Returns
+ -------
+ u : { (..., M, M), (..., M, K) } array
+ Unitary matrices. The actual shape depends on the value of
+ ``full_matrices``. Only returned when ``compute_uv`` is True.
+ s : (..., K) array
+ The singular values for every matrix, sorted in descending order.
+ v : { (..., N, N), (..., K, N) } array
+ Unitary matrices. The actual shape depends on the value of
+ ``full_matrices``. Only returned when ``compute_uv`` is True.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ Singular Value Decomposition is performed using LAPACK routine _gesdd
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for types single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ Examples
+ --------
+ >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
+
+ Reconstruction based on full SVD:
+
+ >>> U, s, V = svd(a, full_matrices=True)
+ >>> U.shape, V.shape, s.shape
+ ((9, 9), (6, 6), (6,))
+ >>> S = np.zeros((9, 6), dtype=complex)
+ >>> S[:6, :6] = np.diag(s)
+ >>> np.allclose(a, np.dot(U, np.dot(S, V)))
+ True
+
+ Reconstruction based on reduced SVD:
+
+ >>> U, s, V = svd(a, full_matrices=False)
+ >>> U.shape, V.shape, s.shape
+ ((9, 6), (6, 6), (6,))
+ >>> S = np.diag(s)
+ >>> np.allclose(a, np.dot(U, np.dot(S, V)))
+ True
+
+ """
+ m = a.shape[-2]
+ n = a.shape[-1]
+ if 1 == compute_uv:
+ if 1 == full_matrices:
+ if m < n:
+ gufunc = _impl.svd_m_f
+ else:
+ gufunc = _impl.svd_n_f
+ else:
+ if m < n:
+ gufunc = _impl.svd_m_s
+ else:
+ gufunc = _impl.svd_n_s
+ else:
+ if m < n:
+ gufunc = _impl.svd_m
+ else:
+ gufunc = _impl.svd_n
+ return gufunc(a, **kw_args)
+
+
+def chosolve(A, B, UPLO='L', **kw_args):
+ """
+ Solve the linear matrix equations on the inner dimensions, using
+ cholesky decomposition.
+
+ Computes the "exact" solution, `x`. of the well-determined,
+ i.e., full rank, linear matrix equations `ax = b`, where a is
+ a symmetric/hermitian positive-definite matrix.
+
+ Parameters
+ ----------
+ A : (..., M, M) array
+ Coefficient symmetric/hermitian positive-definite matrices.
+ B : (..., M, N) array
+ Ordinate or "dependent variable" values.
+ UPLO : {'L', 'U'}, optional
+ Specifies whether the calculation is done with the lower
+ triangular part of the elements in `A` ('L', default) or
+ the upper triangular part ('U').
+
+ Returns
+ -------
+ X : (..., M, N) array
+ Solutions to the system A X = B for all elements in the outer
+ dimensions
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The solutions are computed using LAPACK routines _potrf, _potrs
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ See Also
+ --------
+ solve : solve a system using cholesky decomposition (for equations
+ having symmetric/hermitian coefficient matrices)
+
+ Examples
+ --------
+ Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
+ (note the matrix is symmetric in this system)
+
+ >>> a = np.array([[3,1], [1,2]])
+ >>> b = np.array([9,8])
+ >>> x = solve(a, b)
+ >>> x
+ array([ 2., 3.])
+
+ Check that the solution is correct:
+
+ >>> np.allclose(np.dot(a, x), b)
+ True
+
+ """
+ if len(B.shape) == (len(A.shape) - 1):
+ if 'L' == UPLO:
+ gufunc = _impl.chosolve1_lo
+ else:
+ gufunc = _impl.chosolve1_up
+ else:
+ if 'L' == UPLO:
+ gufunc = _impl.chosolve_lo
+ else:
+ gufunc = _impl.chosolve_up
+
+ return gufunc(A, B, **kw_args)
+
+
+def poinv(A, UPLO='L', **kw_args):
+ """
+ Compute the (multiplicative) inverse of symmetric/hermitian positive
+ definite matrices, with broadcasting.
+
+ Given a square symmetic/hermitian positive-definite matrix `a`, return
+ the matrix `ainv` satisfying ``matrix_multiply(a, ainv) =
+ matrix_multiply(ainv, a) = Identity matrix``.
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Symmetric/hermitian postive definite matrices to be inverted.
+
+ Returns
+ -------
+ ainv : (..., M, M) array
+ (Multiplicative) inverse of the `a` matrices.
+
+ Notes
+ -----
+ Numpy broadcasting rules apply.
+
+ The inverse is computed using LAPACK routines _potrf, _potri
+
+ For elements where the LAPACK routine fails, the result will be set
+ to NaNs.
+
+ Implemented for types single, double, csingle and cdouble. Numpy conversion
+ rules apply.
+
+ See Also
+ --------
+ inv : compute the multiplicative inverse of general matrices.
+
+ Examples
+ --------
+ >>> a = np.array([[5, 3], [3, 5]])
+ >>> ainv = poinv(a)
+ >>> np.allclose(matrix_multiply(a, ainv), np.eye(2))
+ True
+ >>> np.allclose(matrix_multiply(ainv, a), np.eye(2))
+ True
+
+ """
+ if 'L' == UPLO:
+ gufunc = _impl.poinv_lo
+ else:
+ gufunc = _impl.poinv_up
+
+ return gufunc(A, **kw_args);
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/numpy/linalg/bento.info b/numpy/linalg/bento.info
index 1e367e314..52d036753 100644
--- a/numpy/linalg/bento.info
+++ b/numpy/linalg/bento.info
@@ -1,12 +1,21 @@
HookFile: bscript
Library:
+ Extension: _umath_linalg
+ Sources:
+ umath_linalg.c.src,
+ lapack_lite/blas_lite.c,
+ lapack_lite/dlamch.c,
+ lapack_lite/dlapack_lite.c,
+ lapack_lite/f2c_lite.c,
+ lapack_lite/python_xerbla.c,
+ lapack_lite/zlapack_lite.c
Extension: lapack_lite
Sources:
- blas_lite.c,
- dlamch.c,
- dlapack_lite.c,
- f2c_lite.c,
+ lapack_lite/blas_lite.c,
+ lapack_lite/dlamch.c,
+ lapack_lite/dlapack_lite.c,
+ lapack_lite/f2c_lite.c,
lapack_litemodule.c,
- python_xerbla.c,
- zlapack_lite.c
+ lapack_lite/python_xerbla.c,
+ lapack_lite/zlapack_lite.c
diff --git a/numpy/linalg/blas_lite.c b/numpy/linalg/blas_lite.c
deleted file mode 100644
index d0de43478..000000000
--- a/numpy/linalg/blas_lite.c
+++ /dev/null
@@ -1,10660 +0,0 @@
-/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
-#include "f2c.h"
-
-#ifdef HAVE_CONFIG
-#include "config.h"
-#else
-extern doublereal dlamch_(char *);
-#define EPSILON dlamch_("Epsilon")
-#define SAFEMINIMUM dlamch_("Safe minimum")
-#define PRECISION dlamch_("Precision")
-#define BASE dlamch_("Base")
-#endif
-
-extern doublereal dlapy2_(doublereal *x, doublereal *y);
-
-
-
-/* Table of constant values */
-
-static integer c__1 = 1;
-static doublecomplex c_b359 = {1.,0.};
-
-/* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx,
- integer *incx, doublereal *dy, integer *incy)
-{
- /* System generated locals */
- integer i__1;
-
- /* Local variables */
- static integer i__, m, ix, iy, mp1;
-
-
-/*
- constant times a vector plus a vector.
- uses unrolled loops for increments equal to one.
- jack dongarra, linpack, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dy;
- --dx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if (*da == 0.) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dy[iy] += *da * dx[ix];
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/*
- code for both increments equal to 1
-
-
- clean-up loop
-*/
-
-L20:
- m = *n % 4;
- if (m == 0) {
- goto L40;
- }
- i__1 = m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dy[i__] += *da * dx[i__];
-/* L30: */
- }
- if (*n < 4) {
- return 0;
- }
-L40:
- mp1 = m + 1;
- i__1 = *n;
- for (i__ = mp1; i__ <= i__1; i__ += 4) {
- dy[i__] += *da * dx[i__];
- dy[i__ + 1] += *da * dx[i__ + 1];
- dy[i__ + 2] += *da * dx[i__ + 2];
- dy[i__ + 3] += *da * dx[i__ + 3];
-/* L50: */
- }
- return 0;
-} /* daxpy_ */
-
-doublereal dcabs1_(doublecomplex *z__)
-{
- /* System generated locals */
- doublereal ret_val;
- static doublecomplex equiv_0[1];
-
- /* Local variables */
-#define t ((doublereal *)equiv_0)
-#define zz (equiv_0)
-
- zz->r = z__->r, zz->i = z__->i;
- ret_val = abs(t[0]) + abs(t[1]);
- return ret_val;
-} /* dcabs1_ */
-
-#undef zz
-#undef t
-
-
-/* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx,
- doublereal *dy, integer *incy)
-{
- /* System generated locals */
- integer i__1;
-
- /* Local variables */
- static integer i__, m, ix, iy, mp1;
-
-
-/*
- copies a vector, x, to a vector, y.
- uses unrolled loops for increments equal to one.
- jack dongarra, linpack, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dy;
- --dx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dy[iy] = dx[ix];
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/*
- code for both increments equal to 1
-
-
- clean-up loop
-*/
-
-L20:
- m = *n % 7;
- if (m == 0) {
- goto L40;
- }
- i__1 = m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dy[i__] = dx[i__];
-/* L30: */
- }
- if (*n < 7) {
- return 0;
- }
-L40:
- mp1 = m + 1;
- i__1 = *n;
- for (i__ = mp1; i__ <= i__1; i__ += 7) {
- dy[i__] = dx[i__];
- dy[i__ + 1] = dx[i__ + 1];
- dy[i__ + 2] = dx[i__ + 2];
- dy[i__ + 3] = dx[i__ + 3];
- dy[i__ + 4] = dx[i__ + 4];
- dy[i__ + 5] = dx[i__ + 5];
- dy[i__ + 6] = dx[i__ + 6];
-/* L50: */
- }
- return 0;
-} /* dcopy_ */
-
-doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy,
- integer *incy)
-{
- /* System generated locals */
- integer i__1;
- doublereal ret_val;
-
- /* Local variables */
- static integer i__, m, ix, iy, mp1;
- static doublereal dtemp;
-
-
-/*
- forms the dot product of two vectors.
- uses unrolled loops for increments equal to one.
- jack dongarra, linpack, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dy;
- --dx;
-
- /* Function Body */
- ret_val = 0.;
- dtemp = 0.;
- if (*n <= 0) {
- return ret_val;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp += dx[ix] * dy[iy];
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- ret_val = dtemp;
- return ret_val;
-
-/*
- code for both increments equal to 1
-
-
- clean-up loop
-*/
-
-L20:
- m = *n % 5;
- if (m == 0) {
- goto L40;
- }
- i__1 = m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp += dx[i__] * dy[i__];
-/* L30: */
- }
- if (*n < 5) {
- goto L60;
- }
-L40:
- mp1 = m + 1;
- i__1 = *n;
- for (i__ = mp1; i__ <= i__1; i__ += 5) {
- dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[
- i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ +
- 4] * dy[i__ + 4];
-/* L50: */
- }
-L60:
- ret_val = dtemp;
- return ret_val;
-} /* ddot_ */
-
-/* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer *
- n, integer *k, doublereal *alpha, doublereal *a, integer *lda,
- doublereal *b, integer *ldb, doublereal *beta, doublereal *c__,
- integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
- i__3;
-
- /* Local variables */
- static integer i__, j, l, info;
- static logical nota, notb;
- static doublereal temp;
- static integer ncola;
- extern logical lsame_(char *, char *);
- static integer nrowa, nrowb;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DGEMM performs one of the matrix-matrix operations
-
- C := alpha*op( A )*op( B ) + beta*C,
-
- where op( X ) is one of
-
- op( X ) = X or op( X ) = X',
-
- alpha and beta are scalars, and A, B and C are matrices, with op( A )
- an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
-
- Parameters
- ==========
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n', op( A ) = A.
-
- TRANSA = 'T' or 't', op( A ) = A'.
-
- TRANSA = 'C' or 'c', op( A ) = A'.
-
- Unchanged on exit.
-
- TRANSB - CHARACTER*1.
- On entry, TRANSB specifies the form of op( B ) to be used in
- the matrix multiplication as follows:
-
- TRANSB = 'N' or 'n', op( B ) = B.
-
- TRANSB = 'T' or 't', op( B ) = B'.
-
- TRANSB = 'C' or 'c', op( B ) = B'.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix
- op( A ) and of the matrix C. M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix
- op( B ) and the number of columns of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry, K specifies the number of columns of the matrix
- op( A ) and the number of rows of the matrix op( B ). K must
- be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
- k when TRANSA = 'N' or 'n', and is m otherwise.
- Before entry with TRANSA = 'N' or 'n', the leading m by k
- part of the array A must contain the matrix A, otherwise
- the leading k by m part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANSA = 'N' or 'n' then
- LDA must be at least max( 1, m ), otherwise LDA must be at
- least max( 1, k ).
- Unchanged on exit.
-
- B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
- n when TRANSB = 'N' or 'n', and is k otherwise.
- Before entry with TRANSB = 'N' or 'n', the leading k by n
- part of the array B must contain the matrix B, otherwise
- the leading n by k part of the array B must contain the
- matrix B.
- Unchanged on exit.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. When TRANSB = 'N' or 'n' then
- LDB must be at least max( 1, k ), otherwise LDB must be at
- least max( 1, n ).
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then C need not be set on input.
- Unchanged on exit.
-
- C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
- Before entry, the leading m by n part of the array C must
- contain the matrix C, except when beta is zero, in which
- case C need not be set on entry.
- On exit, the array C is overwritten by the m by n matrix
- ( alpha*op( A )*op( B ) + beta*C ).
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Set NOTA and NOTB as true if A and B respectively are not
- transposed and set NROWA, NCOLA and NROWB as the number of rows
- and columns of A and the number of rows of B respectively.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- nota = lsame_(transa, "N");
- notb = lsame_(transb, "N");
- if (nota) {
- nrowa = *m;
- ncola = *k;
- } else {
- nrowa = *k;
- ncola = *m;
- }
- if (notb) {
- nrowb = *k;
- } else {
- nrowb = *n;
- }
-
-/* Test the input parameters. */
-
- info = 0;
- if (((! nota && ! lsame_(transa, "C")) && ! lsame_(
- transa, "T"))) {
- info = 1;
- } else if (((! notb && ! lsame_(transb, "C")) && !
- lsame_(transb, "T"))) {
- info = 2;
- } else if (*m < 0) {
- info = 3;
- } else if (*n < 0) {
- info = 4;
- } else if (*k < 0) {
- info = 5;
- } else if (*lda < max(1,nrowa)) {
- info = 8;
- } else if (*ldb < max(1,nrowb)) {
- info = 10;
- } else if (*ldc < max(1,*m)) {
- info = 13;
- }
- if (info != 0) {
- xerbla_("DGEMM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
- return 0;
- }
-
-/* And if alpha.eq.zero. */
-
- if (*alpha == 0.) {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L30: */
- }
-/* L40: */
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (notb) {
- if (nota) {
-
-/* Form C := alpha*A*B + beta*C. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L50: */
- }
- } else if (*beta != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L60: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (b[l + j * b_dim1] != 0.) {
- temp = *alpha * b[l + j * b_dim1];
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] += temp * a[i__ + l *
- a_dim1];
-/* L70: */
- }
- }
-/* L80: */
- }
-/* L90: */
- }
- } else {
-
-/* Form C := alpha*A'*B + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
-/* L100: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp;
- } else {
- c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
- i__ + j * c_dim1];
- }
-/* L110: */
- }
-/* L120: */
- }
- }
- } else {
- if (nota) {
-
-/* Form C := alpha*A*B' + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L130: */
- }
- } else if (*beta != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L140: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (b[j + l * b_dim1] != 0.) {
- temp = *alpha * b[j + l * b_dim1];
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] += temp * a[i__ + l *
- a_dim1];
-/* L150: */
- }
- }
-/* L160: */
- }
-/* L170: */
- }
- } else {
-
-/* Form C := alpha*A'*B' + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
-/* L180: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp;
- } else {
- c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
- i__ + j * c_dim1];
- }
-/* L190: */
- }
-/* L200: */
- }
- }
- }
-
- return 0;
-
-/* End of DGEMM . */
-
-} /* dgemm_ */
-
-/* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal *
- alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
- doublereal *beta, doublereal *y, integer *incy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublereal temp;
- static integer lenx, leny;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DGEMV performs one of the matrix-vector operations
-
- y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y,
-
- where alpha and beta are scalars, x and y are vectors and A is an
- m by n matrix.
-
- Parameters
- ==========
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
-
- TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
-
- TRANS = 'C' or 'c' y := alpha*A'*x + beta*y.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix A.
- M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
- Before entry, the leading m by n part of the array A must
- contain the matrix of coefficients.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, m ).
- Unchanged on exit.
-
- X - DOUBLE PRECISION array of DIMENSION at least
- ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
- and at least
- ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
- Before entry, the incremented array X must contain the
- vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then Y need not be set on input.
- Unchanged on exit.
-
- Y - DOUBLE PRECISION array of DIMENSION at least
- ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
- and at least
- ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
- Before entry with BETA non-zero, the incremented array Y
- must contain the vector y. On exit, Y is overwritten by the
- updated vector y.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
- --y;
-
- /* Function Body */
- info = 0;
- if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) {
- info = 1;
- } else if (*m < 0) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*lda < max(1,*m)) {
- info = 6;
- } else if (*incx == 0) {
- info = 8;
- } else if (*incy == 0) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("DGEMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || (*alpha == 0. && *beta == 1.)) {
- return 0;
- }
-
-/*
- Set LENX and LENY, the lengths of the vectors x and y, and set
- up the start points in X and Y.
-*/
-
- if (lsame_(trans, "N")) {
- lenx = *n;
- leny = *m;
- } else {
- lenx = *m;
- leny = *n;
- }
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (lenx - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (leny - 1) * *incy;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-
- First form y := beta*y.
-*/
-
- if (*beta != 1.) {
- if (*incy == 1) {
- if (*beta == 0.) {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[i__] = 0.;
-/* L10: */
- }
- } else {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[i__] = *beta * y[i__];
-/* L20: */
- }
- }
- } else {
- iy = ky;
- if (*beta == 0.) {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[iy] = 0.;
- iy += *incy;
-/* L30: */
- }
- } else {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[iy] = *beta * y[iy];
- iy += *incy;
-/* L40: */
- }
- }
- }
- }
- if (*alpha == 0.) {
- return 0;
- }
- if (lsame_(trans, "N")) {
-
-/* Form y := alpha*A*x + y. */
-
- jx = kx;
- if (*incy == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[jx] != 0.) {
- temp = *alpha * x[jx];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- y[i__] += temp * a[i__ + j * a_dim1];
-/* L50: */
- }
- }
- jx += *incx;
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[jx] != 0.) {
- temp = *alpha * x[jx];
- iy = ky;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- y[iy] += temp * a[i__ + j * a_dim1];
- iy += *incy;
-/* L70: */
- }
- }
- jx += *incx;
-/* L80: */
- }
- }
- } else {
-
-/* Form y := alpha*A'*x + y. */
-
- jy = ky;
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp = 0.;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp += a[i__ + j * a_dim1] * x[i__];
-/* L90: */
- }
- y[jy] += *alpha * temp;
- jy += *incy;
-/* L100: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp = 0.;
- ix = kx;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp += a[i__ + j * a_dim1] * x[ix];
- ix += *incx;
-/* L110: */
- }
- y[jy] += *alpha * temp;
- jy += *incy;
-/* L120: */
- }
- }
- }
-
- return 0;
-
-/* End of DGEMV . */
-
-} /* dgemv_ */
-
-/* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha,
- doublereal *x, integer *incx, doublereal *y, integer *incy,
- doublereal *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, ix, jy, kx, info;
- static doublereal temp;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DGER performs the rank 1 operation
-
- A := alpha*x*y' + A,
-
- where alpha is a scalar, x is an m element vector, y is an n element
- vector and A is an m by n matrix.
-
- Parameters
- ==========
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix A.
- M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- X - DOUBLE PRECISION array of dimension at least
- ( 1 + ( m - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the m
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- Y - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y.
- Unchanged on exit.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
- Before entry, the leading m by n part of the array A must
- contain the matrix of coefficients. On exit, A is
- overwritten by the updated matrix.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --x;
- --y;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if (*m < 0) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*incx == 0) {
- info = 5;
- } else if (*incy == 0) {
- info = 7;
- } else if (*lda < max(1,*m)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("DGER ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || *alpha == 0.) {
- return 0;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (*incy > 0) {
- jy = 1;
- } else {
- jy = 1 - (*n - 1) * *incy;
- }
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (y[jy] != 0.) {
- temp = *alpha * y[jy];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] += x[i__] * temp;
-/* L10: */
- }
- }
- jy += *incy;
-/* L20: */
- }
- } else {
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*m - 1) * *incx;
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (y[jy] != 0.) {
- temp = *alpha * y[jy];
- ix = kx;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] += x[ix] * temp;
- ix += *incx;
-/* L30: */
- }
- }
- jy += *incy;
-/* L40: */
- }
- }
-
- return 0;
-
-/* End of DGER . */
-
-} /* dger_ */
-
-doublereal dnrm2_(integer *n, doublereal *x, integer *incx)
-{
- /* System generated locals */
- integer i__1, i__2;
- doublereal ret_val, d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer ix;
- static doublereal ssq, norm, scale, absxi;
-
-
-/*
- DNRM2 returns the euclidean norm of a vector via the function
- name, so that
-
- DNRM2 := sqrt( x'*x )
-
-
- -- This version written on 25-October-1982.
- Modified on 14-October-1993 to inline the call to DLASSQ.
- Sven Hammarling, Nag Ltd.
-*/
-
-
- /* Parameter adjustments */
- --x;
-
- /* Function Body */
- if (*n < 1 || *incx < 1) {
- norm = 0.;
- } else if (*n == 1) {
- norm = abs(x[1]);
- } else {
- scale = 0.;
- ssq = 1.;
-/*
- The following loop is equivalent to this call to the LAPACK
- auxiliary routine:
- CALL DLASSQ( N, X, INCX, SCALE, SSQ )
-*/
-
- i__1 = (*n - 1) * *incx + 1;
- i__2 = *incx;
- for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
- if (x[ix] != 0.) {
- absxi = (d__1 = x[ix], abs(d__1));
- if (scale < absxi) {
-/* Computing 2nd power */
- d__1 = scale / absxi;
- ssq = ssq * (d__1 * d__1) + 1.;
- scale = absxi;
- } else {
-/* Computing 2nd power */
- d__1 = absxi / scale;
- ssq += d__1 * d__1;
- }
- }
-/* L10: */
- }
- norm = scale * sqrt(ssq);
- }
-
- ret_val = norm;
- return ret_val;
-
-/* End of DNRM2. */
-
-} /* dnrm2_ */
-
-/* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx,
- doublereal *dy, integer *incy, doublereal *c__, doublereal *s)
-{
- /* System generated locals */
- integer i__1;
-
- /* Local variables */
- static integer i__, ix, iy;
- static doublereal dtemp;
-
-
-/*
- applies a plane rotation.
- jack dongarra, linpack, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dy;
- --dx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments not equal
- to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp = *c__ * dx[ix] + *s * dy[iy];
- dy[iy] = *c__ * dy[iy] - *s * dx[ix];
- dx[ix] = dtemp;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/* code for both increments equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp = *c__ * dx[i__] + *s * dy[i__];
- dy[i__] = *c__ * dy[i__] - *s * dx[i__];
- dx[i__] = dtemp;
-/* L30: */
- }
- return 0;
-} /* drot_ */
-
-/* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx,
- integer *incx)
-{
- /* System generated locals */
- integer i__1, i__2;
-
- /* Local variables */
- static integer i__, m, mp1, nincx;
-
-
-/*
- scales a vector by a constant.
- uses unrolled loops for increment equal to one.
- jack dongarra, linpack, 3/11/78.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dx;
-
- /* Function Body */
- if (*n <= 0 || *incx <= 0) {
- return 0;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- nincx = *n * *incx;
- i__1 = nincx;
- i__2 = *incx;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
- dx[i__] = *da * dx[i__];
-/* L10: */
- }
- return 0;
-
-/*
- code for increment equal to 1
-
-
- clean-up loop
-*/
-
-L20:
- m = *n % 5;
- if (m == 0) {
- goto L40;
- }
- i__2 = m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- dx[i__] = *da * dx[i__];
-/* L30: */
- }
- if (*n < 5) {
- return 0;
- }
-L40:
- mp1 = m + 1;
- i__2 = *n;
- for (i__ = mp1; i__ <= i__2; i__ += 5) {
- dx[i__] = *da * dx[i__];
- dx[i__ + 1] = *da * dx[i__ + 1];
- dx[i__ + 2] = *da * dx[i__ + 2];
- dx[i__ + 3] = *da * dx[i__ + 3];
- dx[i__ + 4] = *da * dx[i__ + 4];
-/* L50: */
- }
- return 0;
-} /* dscal_ */
-
-/* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx,
- doublereal *dy, integer *incy)
-{
- /* System generated locals */
- integer i__1;
-
- /* Local variables */
- static integer i__, m, ix, iy, mp1;
- static doublereal dtemp;
-
-
-/*
- interchanges two vectors.
- uses unrolled loops for increments equal one.
- jack dongarra, linpack, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dy;
- --dx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments not equal
- to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp = dx[ix];
- dx[ix] = dy[iy];
- dy[iy] = dtemp;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/*
- code for both increments equal to 1
-
-
- clean-up loop
-*/
-
-L20:
- m = *n % 3;
- if (m == 0) {
- goto L40;
- }
- i__1 = m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dtemp = dx[i__];
- dx[i__] = dy[i__];
- dy[i__] = dtemp;
-/* L30: */
- }
- if (*n < 3) {
- return 0;
- }
-L40:
- mp1 = m + 1;
- i__1 = *n;
- for (i__ = mp1; i__ <= i__1; i__ += 3) {
- dtemp = dx[i__];
- dx[i__] = dy[i__];
- dy[i__] = dtemp;
- dtemp = dx[i__ + 1];
- dx[i__ + 1] = dy[i__ + 1];
- dy[i__ + 1] = dtemp;
- dtemp = dx[i__ + 2];
- dx[i__ + 2] = dy[i__ + 2];
- dy[i__ + 2] = dtemp;
-/* L50: */
- }
- return 0;
-} /* dswap_ */
-
-/* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha,
- doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal
- *beta, doublereal *y, integer *incy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublereal temp1, temp2;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DSYMV performs the matrix-vector operation
-
- y := alpha*A*x + beta*y,
-
- where alpha and beta are scalars, x and y are n element vectors and
- A is an n by n symmetric matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array A is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of A
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of A
- is to be referenced.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular part of the symmetric matrix and the strictly
- lower triangular part of A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular part of the symmetric matrix and the strictly
- upper triangular part of A is not referenced.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
- X - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then Y need not be set on input.
- Unchanged on exit.
-
- Y - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y. On exit, Y is overwritten by the updated
- vector y.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
- --y;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*lda < max(1,*n)) {
- info = 5;
- } else if (*incx == 0) {
- info = 7;
- } else if (*incy == 0) {
- info = 10;
- }
- if (info != 0) {
- xerbla_("DSYMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || (*alpha == 0. && *beta == 1.)) {
- return 0;
- }
-
-/* Set up the start points in X and Y. */
-
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*n - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (*n - 1) * *incy;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through the triangular part
- of A.
-
- First form y := beta*y.
-*/
-
- if (*beta != 1.) {
- if (*incy == 1) {
- if (*beta == 0.) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[i__] = 0.;
-/* L10: */
- }
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[i__] = *beta * y[i__];
-/* L20: */
- }
- }
- } else {
- iy = ky;
- if (*beta == 0.) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[iy] = 0.;
- iy += *incy;
-/* L30: */
- }
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- y[iy] = *beta * y[iy];
- iy += *incy;
-/* L40: */
- }
- }
- }
- }
- if (*alpha == 0.) {
- return 0;
- }
- if (lsame_(uplo, "U")) {
-
-/* Form y when A is stored in upper triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp1 = *alpha * x[j];
- temp2 = 0.;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- y[i__] += temp1 * a[i__ + j * a_dim1];
- temp2 += a[i__ + j * a_dim1] * x[i__];
-/* L50: */
- }
- y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
-/* L60: */
- }
- } else {
- jx = kx;
- jy = ky;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp1 = *alpha * x[jx];
- temp2 = 0.;
- ix = kx;
- iy = ky;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- y[iy] += temp1 * a[i__ + j * a_dim1];
- temp2 += a[i__ + j * a_dim1] * x[ix];
- ix += *incx;
- iy += *incy;
-/* L70: */
- }
- y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
- jx += *incx;
- jy += *incy;
-/* L80: */
- }
- }
- } else {
-
-/* Form y when A is stored in lower triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp1 = *alpha * x[j];
- temp2 = 0.;
- y[j] += temp1 * a[j + j * a_dim1];
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- y[i__] += temp1 * a[i__ + j * a_dim1];
- temp2 += a[i__ + j * a_dim1] * x[i__];
-/* L90: */
- }
- y[j] += *alpha * temp2;
-/* L100: */
- }
- } else {
- jx = kx;
- jy = ky;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp1 = *alpha * x[jx];
- temp2 = 0.;
- y[jy] += temp1 * a[j + j * a_dim1];
- ix = jx;
- iy = jy;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- iy += *incy;
- y[iy] += temp1 * a[i__ + j * a_dim1];
- temp2 += a[i__ + j * a_dim1] * x[ix];
-/* L110: */
- }
- y[jy] += *alpha * temp2;
- jx += *incx;
- jy += *incy;
-/* L120: */
- }
- }
- }
-
- return 0;
-
-/* End of DSYMV . */
-
-} /* dsymv_ */
-
-/* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha,
- doublereal *x, integer *incx, doublereal *y, integer *incy,
- doublereal *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublereal temp1, temp2;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DSYR2 performs the symmetric rank 2 operation
-
- A := alpha*x*y' + alpha*y*x' + A,
-
- where alpha is a scalar, x and y are n element vectors and A is an n
- by n symmetric matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array A is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of A
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of A
- is to be referenced.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- X - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- Y - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y.
- Unchanged on exit.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular part of the symmetric matrix and the strictly
- lower triangular part of A is not referenced. On exit, the
- upper triangular part of the array A is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular part of the symmetric matrix and the strictly
- upper triangular part of A is not referenced. On exit, the
- lower triangular part of the array A is overwritten by the
- lower triangular part of the updated matrix.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --x;
- --y;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*incx == 0) {
- info = 5;
- } else if (*incy == 0) {
- info = 7;
- } else if (*lda < max(1,*n)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("DSYR2 ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || *alpha == 0.) {
- return 0;
- }
-
-/*
- Set up the start points in X and Y if the increments are not both
- unity.
-*/
-
- if (*incx != 1 || *incy != 1) {
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*n - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (*n - 1) * *incy;
- }
- jx = kx;
- jy = ky;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through the triangular part
- of A.
-*/
-
- if (lsame_(uplo, "U")) {
-
-/* Form A when A is stored in the upper triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[j] != 0. || y[j] != 0.) {
- temp1 = *alpha * y[j];
- temp2 = *alpha * x[j];
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
- temp1 + y[i__] * temp2;
-/* L10: */
- }
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[jx] != 0. || y[jy] != 0.) {
- temp1 = *alpha * y[jy];
- temp2 = *alpha * x[jx];
- ix = kx;
- iy = ky;
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
- temp1 + y[iy] * temp2;
- ix += *incx;
- iy += *incy;
-/* L30: */
- }
- }
- jx += *incx;
- jy += *incy;
-/* L40: */
- }
- }
- } else {
-
-/* Form A when A is stored in the lower triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[j] != 0. || y[j] != 0.) {
- temp1 = *alpha * y[j];
- temp2 = *alpha * x[j];
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
- temp1 + y[i__] * temp2;
-/* L50: */
- }
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[jx] != 0. || y[jy] != 0.) {
- temp1 = *alpha * y[jy];
- temp2 = *alpha * x[jx];
- ix = jx;
- iy = jy;
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
- temp1 + y[iy] * temp2;
- ix += *incx;
- iy += *incy;
-/* L70: */
- }
- }
- jx += *incx;
- jy += *incy;
-/* L80: */
- }
- }
- }
-
- return 0;
-
-/* End of DSYR2 . */
-
-} /* dsyr2_ */
-
-/* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k,
- doublereal *alpha, doublereal *a, integer *lda, doublereal *b,
- integer *ldb, doublereal *beta, doublereal *c__, integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
- i__3;
-
- /* Local variables */
- static integer i__, j, l, info;
- static doublereal temp1, temp2;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DSYR2K performs one of the symmetric rank 2k operations
-
- C := alpha*A*B' + alpha*B*A' + beta*C,
-
- or
-
- C := alpha*A'*B + alpha*B'*A + beta*C,
-
- where alpha and beta are scalars, C is an n by n symmetric matrix
- and A and B are n by k matrices in the first case and k by n
- matrices in the second case.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array C is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of C
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of C
- is to be referenced.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' +
- beta*C.
-
- TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A +
- beta*C.
-
- TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A +
- beta*C.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry with TRANS = 'N' or 'n', K specifies the number
- of columns of the matrices A and B, and on entry with
- TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
- of rows of the matrices A and B. K must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array A must contain the matrix A, otherwise
- the leading k by n part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDA must be at least max( 1, n ), otherwise LDA must
- be at least max( 1, k ).
- Unchanged on exit.
-
- B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array B must contain the matrix B, otherwise
- the leading k by n part of the array B must contain the
- matrix B.
- Unchanged on exit.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDB must be at least max( 1, n ), otherwise LDB must
- be at least max( 1, k ).
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta.
- Unchanged on exit.
-
- C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array C must contain the upper
- triangular part of the symmetric matrix and the strictly
- lower triangular part of C is not referenced. On exit, the
- upper triangular part of the array C is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array C must contain the lower
- triangular part of the symmetric matrix and the strictly
- upper triangular part of C is not referenced. On exit, the
- lower triangular part of the array C is overwritten by the
- lower triangular part of the updated matrix.
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- if (lsame_(trans, "N")) {
- nrowa = *n;
- } else {
- nrowa = *k;
- }
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! upper && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (((! lsame_(trans, "N") && ! lsame_(trans,
- "T")) && ! lsame_(trans, "C"))) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*k < 0) {
- info = 4;
- } else if (*lda < max(1,nrowa)) {
- info = 7;
- } else if (*ldb < max(1,nrowa)) {
- info = 9;
- } else if (*ldc < max(1,*n)) {
- info = 12;
- }
- if (info != 0) {
- xerbla_("DSYR2K", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if (*alpha == 0.) {
- if (upper) {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L30: */
- }
-/* L40: */
- }
- }
- } else {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L70: */
- }
-/* L80: */
- }
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lsame_(trans, "N")) {
-
-/* Form C := alpha*A*B' + alpha*B*A' + C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L90: */
- }
- } else if (*beta != 1.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L100: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
- temp1 = *alpha * b[j + l * b_dim1];
- temp2 = *alpha * a[j + l * a_dim1];
- i__3 = j;
- for (i__ = 1; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
- i__ + l * a_dim1] * temp1 + b[i__ + l *
- b_dim1] * temp2;
-/* L110: */
- }
- }
-/* L120: */
- }
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L140: */
- }
- } else if (*beta != 1.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L150: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (a[j + l * a_dim1] != 0. || b[j + l * b_dim1] != 0.) {
- temp1 = *alpha * b[j + l * b_dim1];
- temp2 = *alpha * a[j + l * a_dim1];
- i__3 = *n;
- for (i__ = j; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
- i__ + l * a_dim1] * temp1 + b[i__ + l *
- b_dim1] * temp2;
-/* L160: */
- }
- }
-/* L170: */
- }
-/* L180: */
- }
- }
- } else {
-
-/* Form C := alpha*A'*B + alpha*B'*A + C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp1 = 0.;
- temp2 = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
- temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
-/* L190: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
- temp2;
- } else {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
- + *alpha * temp1 + *alpha * temp2;
- }
-/* L200: */
- }
-/* L210: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- temp1 = 0.;
- temp2 = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
- temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
-/* L220: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
- temp2;
- } else {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
- + *alpha * temp1 + *alpha * temp2;
- }
-/* L230: */
- }
-/* L240: */
- }
- }
- }
-
- return 0;
-
-/* End of DSYR2K. */
-
-} /* dsyr2k_ */
-
-/* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k,
- doublereal *alpha, doublereal *a, integer *lda, doublereal *beta,
- doublereal *c__, integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, l, info;
- static doublereal temp;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- DSYRK performs one of the symmetric rank k operations
-
- C := alpha*A*A' + beta*C,
-
- or
-
- C := alpha*A'*A + beta*C,
-
- where alpha and beta are scalars, C is an n by n symmetric matrix
- and A is an n by k matrix in the first case and a k by n matrix
- in the second case.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array C is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of C
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of C
- is to be referenced.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' C := alpha*A*A' + beta*C.
-
- TRANS = 'T' or 't' C := alpha*A'*A + beta*C.
-
- TRANS = 'C' or 'c' C := alpha*A'*A + beta*C.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry with TRANS = 'N' or 'n', K specifies the number
- of columns of the matrix A, and on entry with
- TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
- of rows of the matrix A. K must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array A must contain the matrix A, otherwise
- the leading k by n part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDA must be at least max( 1, n ), otherwise LDA must
- be at least max( 1, k ).
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta.
- Unchanged on exit.
-
- C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array C must contain the upper
- triangular part of the symmetric matrix and the strictly
- lower triangular part of C is not referenced. On exit, the
- upper triangular part of the array C is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array C must contain the lower
- triangular part of the symmetric matrix and the strictly
- upper triangular part of C is not referenced. On exit, the
- lower triangular part of the array C is overwritten by the
- lower triangular part of the updated matrix.
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- if (lsame_(trans, "N")) {
- nrowa = *n;
- } else {
- nrowa = *k;
- }
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! upper && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (((! lsame_(trans, "N") && ! lsame_(trans,
- "T")) && ! lsame_(trans, "C"))) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*k < 0) {
- info = 4;
- } else if (*lda < max(1,nrowa)) {
- info = 7;
- } else if (*ldc < max(1,*n)) {
- info = 10;
- }
- if (info != 0) {
- xerbla_("DSYRK ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if (*alpha == 0.) {
- if (upper) {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L30: */
- }
-/* L40: */
- }
- }
- } else {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L70: */
- }
-/* L80: */
- }
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lsame_(trans, "N")) {
-
-/* Form C := alpha*A*A' + beta*C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L90: */
- }
- } else if (*beta != 1.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L100: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (a[j + l * a_dim1] != 0.) {
- temp = *alpha * a[j + l * a_dim1];
- i__3 = j;
- for (i__ = 1; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] += temp * a[i__ + l *
- a_dim1];
-/* L110: */
- }
- }
-/* L120: */
- }
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = 0.;
-/* L140: */
- }
- } else if (*beta != 1.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
-/* L150: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- if (a[j + l * a_dim1] != 0.) {
- temp = *alpha * a[j + l * a_dim1];
- i__3 = *n;
- for (i__ = j; i__ <= i__3; ++i__) {
- c__[i__ + j * c_dim1] += temp * a[i__ + l *
- a_dim1];
-/* L160: */
- }
- }
-/* L170: */
- }
-/* L180: */
- }
- }
- } else {
-
-/* Form C := alpha*A'*A + beta*C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
-/* L190: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp;
- } else {
- c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
- i__ + j * c_dim1];
- }
-/* L200: */
- }
-/* L210: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- temp = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
-/* L220: */
- }
- if (*beta == 0.) {
- c__[i__ + j * c_dim1] = *alpha * temp;
- } else {
- c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
- i__ + j * c_dim1];
- }
-/* L230: */
- }
-/* L240: */
- }
- }
- }
-
- return 0;
-
-/* End of DSYRK . */
-
-} /* dsyrk_ */
-
-/* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag,
- integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
- lda, doublereal *b, integer *ldb)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, k, info;
- static doublereal temp;
- static logical lside;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical nounit;
-
-
-/*
- Purpose
- =======
-
- DTRMM performs one of the matrix-matrix operations
-
- B := alpha*op( A )*B, or B := alpha*B*op( A ),
-
- where alpha is a scalar, B is an m by n matrix, A is a unit, or
- non-unit, upper or lower triangular matrix and op( A ) is one of
-
- op( A ) = A or op( A ) = A'.
-
- Parameters
- ==========
-
- SIDE - CHARACTER*1.
- On entry, SIDE specifies whether op( A ) multiplies B from
- the left or right as follows:
-
- SIDE = 'L' or 'l' B := alpha*op( A )*B.
-
- SIDE = 'R' or 'r' B := alpha*B*op( A ).
-
- Unchanged on exit.
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix A is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n' op( A ) = A.
-
- TRANSA = 'T' or 't' op( A ) = A'.
-
- TRANSA = 'C' or 'c' op( A ) = A'.
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit triangular
- as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of B. M must be at
- least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of B. N must be
- at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha. When alpha is
- zero then A is not referenced and B need not be set before
- entry.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
- when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
- Before entry with UPLO = 'U' or 'u', the leading k by k
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading k by k
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When SIDE = 'L' or 'l' then
- LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
- then LDA must be at least max( 1, n ).
- Unchanged on exit.
-
- B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
- Before entry, the leading m by n part of the array B must
- contain the matrix B, and on exit is overwritten by the
- transformed matrix.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. LDB must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- lside = lsame_(side, "L");
- if (lside) {
- nrowa = *m;
- } else {
- nrowa = *n;
- }
- nounit = lsame_(diag, "N");
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! lside && ! lsame_(side, "R"))) {
- info = 1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
- info = 2;
- } else if (((! lsame_(transa, "N") && ! lsame_(
- transa, "T")) && ! lsame_(transa, "C"))) {
- info = 3;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 4;
- } else if (*m < 0) {
- info = 5;
- } else if (*n < 0) {
- info = 6;
- } else if (*lda < max(1,nrowa)) {
- info = 9;
- } else if (*ldb < max(1,*m)) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("DTRMM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if (*alpha == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lside) {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*A*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (k = 1; k <= i__2; ++k) {
- if (b[k + j * b_dim1] != 0.) {
- temp = *alpha * b[k + j * b_dim1];
- i__3 = k - 1;
- for (i__ = 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] += temp * a[i__ + k *
- a_dim1];
-/* L30: */
- }
- if (nounit) {
- temp *= a[k + k * a_dim1];
- }
- b[k + j * b_dim1] = temp;
- }
-/* L40: */
- }
-/* L50: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (k = *m; k >= 1; --k) {
- if (b[k + j * b_dim1] != 0.) {
- temp = *alpha * b[k + j * b_dim1];
- b[k + j * b_dim1] = temp;
- if (nounit) {
- b[k + j * b_dim1] *= a[k + k * a_dim1];
- }
- i__2 = *m;
- for (i__ = k + 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] += temp * a[i__ + k *
- a_dim1];
-/* L60: */
- }
- }
-/* L70: */
- }
-/* L80: */
- }
- }
- } else {
-
-/* Form B := alpha*A'*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (i__ = *m; i__ >= 1; --i__) {
- temp = b[i__ + j * b_dim1];
- if (nounit) {
- temp *= a[i__ + i__ * a_dim1];
- }
- i__2 = i__ - 1;
- for (k = 1; k <= i__2; ++k) {
- temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
-/* L90: */
- }
- b[i__ + j * b_dim1] = *alpha * temp;
-/* L100: */
- }
-/* L110: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = b[i__ + j * b_dim1];
- if (nounit) {
- temp *= a[i__ + i__ * a_dim1];
- }
- i__3 = *m;
- for (k = i__ + 1; k <= i__3; ++k) {
- temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
-/* L120: */
- }
- b[i__ + j * b_dim1] = *alpha * temp;
-/* L130: */
- }
-/* L140: */
- }
- }
- }
- } else {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*B*A. */
-
- if (upper) {
- for (j = *n; j >= 1; --j) {
- temp = *alpha;
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
-/* L150: */
- }
- i__1 = j - 1;
- for (k = 1; k <= i__1; ++k) {
- if (a[k + j * a_dim1] != 0.) {
- temp = *alpha * a[k + j * a_dim1];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] += temp * b[i__ + k *
- b_dim1];
-/* L160: */
- }
- }
-/* L170: */
- }
-/* L180: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp = *alpha;
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
-/* L190: */
- }
- i__2 = *n;
- for (k = j + 1; k <= i__2; ++k) {
- if (a[k + j * a_dim1] != 0.) {
- temp = *alpha * a[k + j * a_dim1];
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] += temp * b[i__ + k *
- b_dim1];
-/* L200: */
- }
- }
-/* L210: */
- }
-/* L220: */
- }
- }
- } else {
-
-/* Form B := alpha*B*A'. */
-
- if (upper) {
- i__1 = *n;
- for (k = 1; k <= i__1; ++k) {
- i__2 = k - 1;
- for (j = 1; j <= i__2; ++j) {
- if (a[j + k * a_dim1] != 0.) {
- temp = *alpha * a[j + k * a_dim1];
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] += temp * b[i__ + k *
- b_dim1];
-/* L230: */
- }
- }
-/* L240: */
- }
- temp = *alpha;
- if (nounit) {
- temp *= a[k + k * a_dim1];
- }
- if (temp != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
-/* L250: */
- }
- }
-/* L260: */
- }
- } else {
- for (k = *n; k >= 1; --k) {
- i__1 = *n;
- for (j = k + 1; j <= i__1; ++j) {
- if (a[j + k * a_dim1] != 0.) {
- temp = *alpha * a[j + k * a_dim1];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] += temp * b[i__ + k *
- b_dim1];
-/* L270: */
- }
- }
-/* L280: */
- }
- temp = *alpha;
- if (nounit) {
- temp *= a[k + k * a_dim1];
- }
- if (temp != 1.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
-/* L290: */
- }
- }
-/* L300: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of DTRMM . */
-
-} /* dtrmm_ */
-
-/* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n,
- doublereal *a, integer *lda, doublereal *x, integer *incx)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, ix, jx, kx, info;
- static doublereal temp;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical nounit;
-
-
-/*
- Purpose
- =======
-
- DTRMV performs one of the matrix-vector operations
-
- x := A*x, or x := A'*x,
-
- where x is an n element vector and A is an n by n unit, or non-unit,
- upper or lower triangular matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' x := A*x.
-
- TRANS = 'T' or 't' x := A'*x.
-
- TRANS = 'C' or 'c' x := A'*x.
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit
- triangular as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
- X - DOUBLE PRECISION array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x. On exit, X is overwritten with the
- tranformed vector x.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (((! lsame_(trans, "N") && ! lsame_(trans,
- "T")) && ! lsame_(trans, "C"))) {
- info = 2;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 3;
- } else if (*n < 0) {
- info = 4;
- } else if (*lda < max(1,*n)) {
- info = 6;
- } else if (*incx == 0) {
- info = 8;
- }
- if (info != 0) {
- xerbla_("DTRMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
- nounit = lsame_(diag, "N");
-
-/*
- Set up the start point in X if the increment is not unity. This
- will be ( N - 1 )*INCX too small for descending loops.
-*/
-
- if (*incx <= 0) {
- kx = 1 - (*n - 1) * *incx;
- } else if (*incx != 1) {
- kx = 1;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (lsame_(trans, "N")) {
-
-/* Form x := A*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[j] != 0.) {
- temp = x[j];
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- x[i__] += temp * a[i__ + j * a_dim1];
-/* L10: */
- }
- if (nounit) {
- x[j] *= a[j + j * a_dim1];
- }
- }
-/* L20: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (x[jx] != 0.) {
- temp = x[jx];
- ix = kx;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- x[ix] += temp * a[i__ + j * a_dim1];
- ix += *incx;
-/* L30: */
- }
- if (nounit) {
- x[jx] *= a[j + j * a_dim1];
- }
- }
- jx += *incx;
-/* L40: */
- }
- }
- } else {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- if (x[j] != 0.) {
- temp = x[j];
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- x[i__] += temp * a[i__ + j * a_dim1];
-/* L50: */
- }
- if (nounit) {
- x[j] *= a[j + j * a_dim1];
- }
- }
-/* L60: */
- }
- } else {
- kx += (*n - 1) * *incx;
- jx = kx;
- for (j = *n; j >= 1; --j) {
- if (x[jx] != 0.) {
- temp = x[jx];
- ix = kx;
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- x[ix] += temp * a[i__ + j * a_dim1];
- ix -= *incx;
-/* L70: */
- }
- if (nounit) {
- x[jx] *= a[j + j * a_dim1];
- }
- }
- jx -= *incx;
-/* L80: */
- }
- }
- }
- } else {
-
-/* Form x := A'*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- temp = x[j];
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- temp += a[i__ + j * a_dim1] * x[i__];
-/* L90: */
- }
- x[j] = temp;
-/* L100: */
- }
- } else {
- jx = kx + (*n - 1) * *incx;
- for (j = *n; j >= 1; --j) {
- temp = x[jx];
- ix = jx;
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- ix -= *incx;
- temp += a[i__ + j * a_dim1] * x[ix];
-/* L110: */
- }
- x[jx] = temp;
- jx -= *incx;
-/* L120: */
- }
- }
- } else {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp = x[j];
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- temp += a[i__ + j * a_dim1] * x[i__];
-/* L130: */
- }
- x[j] = temp;
-/* L140: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp = x[jx];
- ix = jx;
- if (nounit) {
- temp *= a[j + j * a_dim1];
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- temp += a[i__ + j * a_dim1] * x[ix];
-/* L150: */
- }
- x[jx] = temp;
- jx += *incx;
-/* L160: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of DTRMV . */
-
-} /* dtrmv_ */
-
-/* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag,
- integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
- lda, doublereal *b, integer *ldb)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, k, info;
- static doublereal temp;
- static logical lside;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical nounit;
-
-
-/*
- Purpose
- =======
-
- DTRSM solves one of the matrix equations
-
- op( A )*X = alpha*B, or X*op( A ) = alpha*B,
-
- where alpha is a scalar, X and B are m by n matrices, A is a unit, or
- non-unit, upper or lower triangular matrix and op( A ) is one of
-
- op( A ) = A or op( A ) = A'.
-
- The matrix X is overwritten on B.
-
- Parameters
- ==========
-
- SIDE - CHARACTER*1.
- On entry, SIDE specifies whether op( A ) appears on the left
- or right of X as follows:
-
- SIDE = 'L' or 'l' op( A )*X = alpha*B.
-
- SIDE = 'R' or 'r' X*op( A ) = alpha*B.
-
- Unchanged on exit.
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix A is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n' op( A ) = A.
-
- TRANSA = 'T' or 't' op( A ) = A'.
-
- TRANSA = 'C' or 'c' op( A ) = A'.
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit triangular
- as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of B. M must be at
- least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of B. N must be
- at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION.
- On entry, ALPHA specifies the scalar alpha. When alpha is
- zero then A is not referenced and B need not be set before
- entry.
- Unchanged on exit.
-
- A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
- when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
- Before entry with UPLO = 'U' or 'u', the leading k by k
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading k by k
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When SIDE = 'L' or 'l' then
- LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
- then LDA must be at least max( 1, n ).
- Unchanged on exit.
-
- B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
- Before entry, the leading m by n part of the array B must
- contain the right-hand side matrix B, and on exit is
- overwritten by the solution matrix X.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. LDB must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- lside = lsame_(side, "L");
- if (lside) {
- nrowa = *m;
- } else {
- nrowa = *n;
- }
- nounit = lsame_(diag, "N");
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! lside && ! lsame_(side, "R"))) {
- info = 1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
- info = 2;
- } else if (((! lsame_(transa, "N") && ! lsame_(
- transa, "T")) && ! lsame_(transa, "C"))) {
- info = 3;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 4;
- } else if (*m < 0) {
- info = 5;
- } else if (*n < 0) {
- info = 6;
- } else if (*lda < max(1,nrowa)) {
- info = 9;
- } else if (*ldb < max(1,*m)) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("DTRSM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if (*alpha == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lside) {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*inv( A )*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*alpha != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
- ;
-/* L30: */
- }
- }
- for (k = *m; k >= 1; --k) {
- if (b[k + j * b_dim1] != 0.) {
- if (nounit) {
- b[k + j * b_dim1] /= a[k + k * a_dim1];
- }
- i__2 = k - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
- i__ + k * a_dim1];
-/* L40: */
- }
- }
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*alpha != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
- ;
-/* L70: */
- }
- }
- i__2 = *m;
- for (k = 1; k <= i__2; ++k) {
- if (b[k + j * b_dim1] != 0.) {
- if (nounit) {
- b[k + j * b_dim1] /= a[k + k * a_dim1];
- }
- i__3 = *m;
- for (i__ = k + 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
- i__ + k * a_dim1];
-/* L80: */
- }
- }
-/* L90: */
- }
-/* L100: */
- }
- }
- } else {
-
-/* Form B := alpha*inv( A' )*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = *alpha * b[i__ + j * b_dim1];
- i__3 = i__ - 1;
- for (k = 1; k <= i__3; ++k) {
- temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
-/* L110: */
- }
- if (nounit) {
- temp /= a[i__ + i__ * a_dim1];
- }
- b[i__ + j * b_dim1] = temp;
-/* L120: */
- }
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (i__ = *m; i__ >= 1; --i__) {
- temp = *alpha * b[i__ + j * b_dim1];
- i__2 = *m;
- for (k = i__ + 1; k <= i__2; ++k) {
- temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
-/* L140: */
- }
- if (nounit) {
- temp /= a[i__ + i__ * a_dim1];
- }
- b[i__ + j * b_dim1] = temp;
-/* L150: */
- }
-/* L160: */
- }
- }
- }
- } else {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*B*inv( A ). */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*alpha != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
- ;
-/* L170: */
- }
- }
- i__2 = j - 1;
- for (k = 1; k <= i__2; ++k) {
- if (a[k + j * a_dim1] != 0.) {
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
- i__ + k * b_dim1];
-/* L180: */
- }
- }
-/* L190: */
- }
- if (nounit) {
- temp = 1. / a[j + j * a_dim1];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
-/* L200: */
- }
- }
-/* L210: */
- }
- } else {
- for (j = *n; j >= 1; --j) {
- if (*alpha != 1.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
- ;
-/* L220: */
- }
- }
- i__1 = *n;
- for (k = j + 1; k <= i__1; ++k) {
- if (a[k + j * a_dim1] != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
- i__ + k * b_dim1];
-/* L230: */
- }
- }
-/* L240: */
- }
- if (nounit) {
- temp = 1. / a[j + j * a_dim1];
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
-/* L250: */
- }
- }
-/* L260: */
- }
- }
- } else {
-
-/* Form B := alpha*B*inv( A' ). */
-
- if (upper) {
- for (k = *n; k >= 1; --k) {
- if (nounit) {
- temp = 1. / a[k + k * a_dim1];
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
-/* L270: */
- }
- }
- i__1 = k - 1;
- for (j = 1; j <= i__1; ++j) {
- if (a[j + k * a_dim1] != 0.) {
- temp = a[j + k * a_dim1];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] -= temp * b[i__ + k *
- b_dim1];
-/* L280: */
- }
- }
-/* L290: */
- }
- if (*alpha != 1.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
- ;
-/* L300: */
- }
- }
-/* L310: */
- }
- } else {
- i__1 = *n;
- for (k = 1; k <= i__1; ++k) {
- if (nounit) {
- temp = 1. / a[k + k * a_dim1];
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
-/* L320: */
- }
- }
- i__2 = *n;
- for (j = k + 1; j <= i__2; ++j) {
- if (a[j + k * a_dim1] != 0.) {
- temp = a[j + k * a_dim1];
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- b[i__ + j * b_dim1] -= temp * b[i__ + k *
- b_dim1];
-/* L330: */
- }
- }
-/* L340: */
- }
- if (*alpha != 1.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
- ;
-/* L350: */
- }
- }
-/* L360: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of DTRSM . */
-
-} /* dtrsm_ */
-
-doublereal dzasum_(integer *n, doublecomplex *zx, integer *incx)
-{
- /* System generated locals */
- integer i__1;
- doublereal ret_val;
-
- /* Local variables */
- static integer i__, ix;
- static doublereal stemp;
- extern doublereal dcabs1_(doublecomplex *);
-
-
-/*
- takes the sum of the absolute values.
- jack dongarra, 3/11/78.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zx;
-
- /* Function Body */
- ret_val = 0.;
- stemp = 0.;
- if (*n <= 0 || *incx <= 0) {
- return ret_val;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- ix = 1;
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- stemp += dcabs1_(&zx[ix]);
- ix += *incx;
-/* L10: */
- }
- ret_val = stemp;
- return ret_val;
-
-/* code for increment equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- stemp += dcabs1_(&zx[i__]);
-/* L30: */
- }
- ret_val = stemp;
- return ret_val;
-} /* dzasum_ */
-
-doublereal dznrm2_(integer *n, doublecomplex *x, integer *incx)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
- doublereal ret_val, d__1;
-
- /* Builtin functions */
- double d_imag(doublecomplex *), sqrt(doublereal);
-
- /* Local variables */
- static integer ix;
- static doublereal ssq, temp, norm, scale;
-
-
-/*
- DZNRM2 returns the euclidean norm of a vector via the function
- name, so that
-
- DZNRM2 := sqrt( conjg( x' )*x )
-
-
- -- This version written on 25-October-1982.
- Modified on 14-October-1993 to inline the call to ZLASSQ.
- Sven Hammarling, Nag Ltd.
-*/
-
-
- /* Parameter adjustments */
- --x;
-
- /* Function Body */
- if (*n < 1 || *incx < 1) {
- norm = 0.;
- } else {
- scale = 0.;
- ssq = 1.;
-/*
- The following loop is equivalent to this call to the LAPACK
- auxiliary routine:
- CALL ZLASSQ( N, X, INCX, SCALE, SSQ )
-*/
-
- i__1 = (*n - 1) * *incx + 1;
- i__2 = *incx;
- for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
- i__3 = ix;
- if (x[i__3].r != 0.) {
- i__3 = ix;
- temp = (d__1 = x[i__3].r, abs(d__1));
- if (scale < temp) {
-/* Computing 2nd power */
- d__1 = scale / temp;
- ssq = ssq * (d__1 * d__1) + 1.;
- scale = temp;
- } else {
-/* Computing 2nd power */
- d__1 = temp / scale;
- ssq += d__1 * d__1;
- }
- }
- if (d_imag(&x[ix]) != 0.) {
- temp = (d__1 = d_imag(&x[ix]), abs(d__1));
- if (scale < temp) {
-/* Computing 2nd power */
- d__1 = scale / temp;
- ssq = ssq * (d__1 * d__1) + 1.;
- scale = temp;
- } else {
-/* Computing 2nd power */
- d__1 = temp / scale;
- ssq += d__1 * d__1;
- }
- }
-/* L10: */
- }
- norm = scale * sqrt(ssq);
- }
-
- ret_val = norm;
- return ret_val;
-
-/* End of DZNRM2. */
-
-} /* dznrm2_ */
-
-integer idamax_(integer *n, doublereal *dx, integer *incx)
-{
- /* System generated locals */
- integer ret_val, i__1;
- doublereal d__1;
-
- /* Local variables */
- static integer i__, ix;
- static doublereal dmax__;
-
-
-/*
- finds the index of element having max. absolute value.
- jack dongarra, linpack, 3/11/78.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --dx;
-
- /* Function Body */
- ret_val = 0;
- if (*n < 1 || *incx <= 0) {
- return ret_val;
- }
- ret_val = 1;
- if (*n == 1) {
- return ret_val;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- ix = 1;
- dmax__ = abs(dx[1]);
- ix += *incx;
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- if ((d__1 = dx[ix], abs(d__1)) <= dmax__) {
- goto L5;
- }
- ret_val = i__;
- dmax__ = (d__1 = dx[ix], abs(d__1));
-L5:
- ix += *incx;
-/* L10: */
- }
- return ret_val;
-
-/* code for increment equal to 1 */
-
-L20:
- dmax__ = abs(dx[1]);
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- if ((d__1 = dx[i__], abs(d__1)) <= dmax__) {
- goto L30;
- }
- ret_val = i__;
- dmax__ = (d__1 = dx[i__], abs(d__1));
-L30:
- ;
- }
- return ret_val;
-} /* idamax_ */
-
-integer izamax_(integer *n, doublecomplex *zx, integer *incx)
-{
- /* System generated locals */
- integer ret_val, i__1;
-
- /* Local variables */
- static integer i__, ix;
- static doublereal smax;
- extern doublereal dcabs1_(doublecomplex *);
-
-
-/*
- finds the index of element having max. absolute value.
- jack dongarra, 1/15/85.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zx;
-
- /* Function Body */
- ret_val = 0;
- if (*n < 1 || *incx <= 0) {
- return ret_val;
- }
- ret_val = 1;
- if (*n == 1) {
- return ret_val;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- ix = 1;
- smax = dcabs1_(&zx[1]);
- ix += *incx;
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- if (dcabs1_(&zx[ix]) <= smax) {
- goto L5;
- }
- ret_val = i__;
- smax = dcabs1_(&zx[ix]);
-L5:
- ix += *incx;
-/* L10: */
- }
- return ret_val;
-
-/* code for increment equal to 1 */
-
-L20:
- smax = dcabs1_(&zx[1]);
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- if (dcabs1_(&zx[i__]) <= smax) {
- goto L30;
- }
- ret_val = i__;
- smax = dcabs1_(&zx[i__]);
-L30:
- ;
- }
- return ret_val;
-} /* izamax_ */
-
-logical lsame_(char *ca, char *cb)
-{
- /* System generated locals */
- logical ret_val;
-
- /* Local variables */
- static integer inta, intb, zcode;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- LSAME returns .TRUE. if CA is the same letter as CB regardless of
- case.
-
- Arguments
- =========
-
- CA (input) CHARACTER*1
- CB (input) CHARACTER*1
- CA and CB specify the single characters to be compared.
-
- =====================================================================
-
-
- Test if the characters are equal
-*/
-
- ret_val = *(unsigned char *)ca == *(unsigned char *)cb;
- if (ret_val) {
- return ret_val;
- }
-
-/* Now test for equivalence if both characters are alphabetic. */
-
- zcode = 'Z';
-
-/*
- Use 'Z' rather than 'A' so that ASCII can be detected on Prime
- machines, on which ICHAR returns a value with bit 8 set.
- ICHAR('A') on Prime machines returns 193 which is the same as
- ICHAR('A') on an EBCDIC machine.
-*/
-
- inta = *(unsigned char *)ca;
- intb = *(unsigned char *)cb;
-
- if (zcode == 90 || zcode == 122) {
-
-/*
- ASCII is assumed - ZCODE is the ASCII code of either lower or
- upper case 'Z'.
-*/
-
- if ((inta >= 97 && inta <= 122)) {
- inta += -32;
- }
- if ((intb >= 97 && intb <= 122)) {
- intb += -32;
- }
-
- } else if (zcode == 233 || zcode == 169) {
-
-/*
- EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or
- upper case 'Z'.
-*/
-
- if ((inta >= 129 && inta <= 137) || (inta >= 145 && inta <= 153) || (
- inta >= 162 && inta <= 169)) {
- inta += 64;
- }
- if ((intb >= 129 && intb <= 137) || (intb >= 145 && intb <= 153) || (
- intb >= 162 && intb <= 169)) {
- intb += 64;
- }
-
- } else if (zcode == 218 || zcode == 250) {
-
-/*
- ASCII is assumed, on Prime machines - ZCODE is the ASCII code
- plus 128 of either lower or upper case 'Z'.
-*/
-
- if ((inta >= 225 && inta <= 250)) {
- inta += -32;
- }
- if ((intb >= 225 && intb <= 250)) {
- intb += -32;
- }
- }
- ret_val = inta == intb;
-
-/*
- RETURN
-
- End of LSAME
-*/
-
- return ret_val;
-} /* lsame_ */
-
-/* Using xerbla_ from pythonxerbla.c */
-/* Subroutine */ int xerbla_DISABLE(char *srname, integer *info)
-{
- /* Format strings */
- static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu"
- "mber \002,i2,\002 had \002,\002an illegal value\002)";
-
- /* Builtin functions */
- integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
- /* Subroutine */ int s_stop(char *, ftnlen);
-
- /* Fortran I/O blocks */
- static cilist io___147 = { 0, 6, 0, fmt_9999, 0 };
-
-
-/*
- -- LAPACK auxiliary routine (preliminary version) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- XERBLA is an error handler for the LAPACK routines.
- It is called by an LAPACK routine if an input parameter has an
- invalid value. A message is printed and execution stops.
-
- Installers may consider modifying the STOP statement in order to
- call system-specific exception-handling facilities.
-
- Arguments
- =========
-
- SRNAME (input) CHARACTER*6
- The name of the routine which called XERBLA.
-
- INFO (input) INTEGER
- The position of the invalid parameter in the parameter list
- of the calling routine.
-*/
-
-
- s_wsfe(&io___147);
- do_fio(&c__1, srname, (ftnlen)6);
- do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer));
- e_wsfe();
-
- s_stop("", (ftnlen)0);
-
-
-/* End of XERBLA */
-
- return 0;
-} /* xerbla_ */
-
-/* Subroutine */ int zaxpy_(integer *n, doublecomplex *za, doublecomplex *zx,
- integer *incx, doublecomplex *zy, integer *incy)
-{
- /* System generated locals */
- integer i__1, i__2, i__3, i__4;
- doublecomplex z__1, z__2;
-
- /* Local variables */
- static integer i__, ix, iy;
- extern doublereal dcabs1_(doublecomplex *);
-
-
-/*
- constant times a vector plus a vector.
- jack dongarra, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
- /* Parameter adjustments */
- --zy;
- --zx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if (dcabs1_(za) == 0.) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- i__3 = iy;
- i__4 = ix;
- z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
- i__4].i + za->i * zx[i__4].r;
- z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
- zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/* code for both increments equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- i__4 = i__;
- z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
- i__4].i + za->i * zx[i__4].r;
- z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
- zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
-/* L30: */
- }
- return 0;
-} /* zaxpy_ */
-
-/* Subroutine */ int zcopy_(integer *n, doublecomplex *zx, integer *incx,
- doublecomplex *zy, integer *incy)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, ix, iy;
-
-
-/*
- copies a vector, x, to a vector, y.
- jack dongarra, linpack, 4/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zy;
- --zx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- i__3 = ix;
- zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/* code for both increments equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
-/* L30: */
- }
- return 0;
-} /* zcopy_ */
-
-/* Double Complex */ VOID zdotc_(doublecomplex * ret_val, integer *n,
- doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
-{
- /* System generated locals */
- integer i__1, i__2;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, ix, iy;
- static doublecomplex ztemp;
-
-
-/*
- forms the dot product of a vector.
- jack dongarra, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
- /* Parameter adjustments */
- --zy;
- --zx;
-
- /* Function Body */
- ztemp.r = 0., ztemp.i = 0.;
- ret_val->r = 0., ret_val->i = 0.;
- if (*n <= 0) {
- return ;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d_cnjg(&z__3, &zx[ix]);
- i__2 = iy;
- z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
- zy[i__2].i + z__3.i * zy[i__2].r;
- z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
- ztemp.r = z__1.r, ztemp.i = z__1.i;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- ret_val->r = ztemp.r, ret_val->i = ztemp.i;
- return ;
-
-/* code for both increments equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d_cnjg(&z__3, &zx[i__]);
- i__2 = i__;
- z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
- zy[i__2].i + z__3.i * zy[i__2].r;
- z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
- ztemp.r = z__1.r, ztemp.i = z__1.i;
-/* L30: */
- }
- ret_val->r = ztemp.r, ret_val->i = ztemp.i;
- return ;
-} /* zdotc_ */
-
-/* Double Complex */ VOID zdotu_(doublecomplex * ret_val, integer *n,
- doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
- doublecomplex z__1, z__2;
-
- /* Local variables */
- static integer i__, ix, iy;
- static doublecomplex ztemp;
-
-
-/*
- forms the dot product of two vectors.
- jack dongarra, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
- /* Parameter adjustments */
- --zy;
- --zx;
-
- /* Function Body */
- ztemp.r = 0., ztemp.i = 0.;
- ret_val->r = 0., ret_val->i = 0.;
- if (*n <= 0) {
- return ;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments
- not equal to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = ix;
- i__3 = iy;
- z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
- zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
- z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
- ztemp.r = z__1.r, ztemp.i = z__1.i;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- ret_val->r = ztemp.r, ret_val->i = ztemp.i;
- return ;
-
-/* code for both increments equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
- zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
- z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
- ztemp.r = z__1.r, ztemp.i = z__1.i;
-/* L30: */
- }
- ret_val->r = ztemp.r, ret_val->i = ztemp.i;
- return ;
-} /* zdotu_ */
-
-/* Subroutine */ int zdscal_(integer *n, doublereal *da, doublecomplex *zx,
- integer *incx)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
- doublecomplex z__1, z__2;
-
- /* Local variables */
- static integer i__, ix;
-
-
-/*
- scales a vector by a constant.
- jack dongarra, 3/11/78.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zx;
-
- /* Function Body */
- if (*n <= 0 || *incx <= 0) {
- return 0;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- ix = 1;
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = ix;
- z__2.r = *da, z__2.i = 0.;
- i__3 = ix;
- z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
- zx[i__3].i + z__2.i * zx[i__3].r;
- zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
- ix += *incx;
-/* L10: */
- }
- return 0;
-
-/* code for increment equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- z__2.r = *da, z__2.i = 0.;
- i__3 = i__;
- z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
- zx[i__3].i + z__2.i * zx[i__3].r;
- zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
-/* L30: */
- }
- return 0;
-} /* zdscal_ */
-
-/* Subroutine */ int zgemm_(char *transa, char *transb, integer *m, integer *
- n, integer *k, doublecomplex *alpha, doublecomplex *a, integer *lda,
- doublecomplex *b, integer *ldb, doublecomplex *beta, doublecomplex *
- c__, integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
- i__3, i__4, i__5, i__6;
- doublecomplex z__1, z__2, z__3, z__4;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, l, info;
- static logical nota, notb;
- static doublecomplex temp;
- static logical conja, conjb;
- static integer ncola;
- extern logical lsame_(char *, char *);
- static integer nrowa, nrowb;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZGEMM performs one of the matrix-matrix operations
-
- C := alpha*op( A )*op( B ) + beta*C,
-
- where op( X ) is one of
-
- op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ),
-
- alpha and beta are scalars, and A, B and C are matrices, with op( A )
- an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
-
- Parameters
- ==========
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n', op( A ) = A.
-
- TRANSA = 'T' or 't', op( A ) = A'.
-
- TRANSA = 'C' or 'c', op( A ) = conjg( A' ).
-
- Unchanged on exit.
-
- TRANSB - CHARACTER*1.
- On entry, TRANSB specifies the form of op( B ) to be used in
- the matrix multiplication as follows:
-
- TRANSB = 'N' or 'n', op( B ) = B.
-
- TRANSB = 'T' or 't', op( B ) = B'.
-
- TRANSB = 'C' or 'c', op( B ) = conjg( B' ).
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix
- op( A ) and of the matrix C. M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix
- op( B ) and the number of columns of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry, K specifies the number of columns of the matrix
- op( A ) and the number of rows of the matrix op( B ). K must
- be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
- k when TRANSA = 'N' or 'n', and is m otherwise.
- Before entry with TRANSA = 'N' or 'n', the leading m by k
- part of the array A must contain the matrix A, otherwise
- the leading k by m part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANSA = 'N' or 'n' then
- LDA must be at least max( 1, m ), otherwise LDA must be at
- least max( 1, k ).
- Unchanged on exit.
-
- B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
- n when TRANSB = 'N' or 'n', and is k otherwise.
- Before entry with TRANSB = 'N' or 'n', the leading k by n
- part of the array B must contain the matrix B, otherwise
- the leading n by k part of the array B must contain the
- matrix B.
- Unchanged on exit.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. When TRANSB = 'N' or 'n' then
- LDB must be at least max( 1, k ), otherwise LDB must be at
- least max( 1, n ).
- Unchanged on exit.
-
- BETA - COMPLEX*16 .
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then C need not be set on input.
- Unchanged on exit.
-
- C - COMPLEX*16 array of DIMENSION ( LDC, n ).
- Before entry, the leading m by n part of the array C must
- contain the matrix C, except when beta is zero, in which
- case C need not be set on entry.
- On exit, the array C is overwritten by the m by n matrix
- ( alpha*op( A )*op( B ) + beta*C ).
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Set NOTA and NOTB as true if A and B respectively are not
- conjugated or transposed, set CONJA and CONJB as true if A and
- B respectively are to be transposed but not conjugated and set
- NROWA, NCOLA and NROWB as the number of rows and columns of A
- and the number of rows of B respectively.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- nota = lsame_(transa, "N");
- notb = lsame_(transb, "N");
- conja = lsame_(transa, "C");
- conjb = lsame_(transb, "C");
- if (nota) {
- nrowa = *m;
- ncola = *k;
- } else {
- nrowa = *k;
- ncola = *m;
- }
- if (notb) {
- nrowb = *k;
- } else {
- nrowb = *n;
- }
-
-/* Test the input parameters. */
-
- info = 0;
- if (((! nota && ! conja) && ! lsame_(transa, "T")))
- {
- info = 1;
- } else if (((! notb && ! conjb) && ! lsame_(transb, "T"))) {
- info = 2;
- } else if (*m < 0) {
- info = 3;
- } else if (*n < 0) {
- info = 4;
- } else if (*k < 0) {
- info = 5;
- } else if (*lda < max(1,nrowa)) {
- info = 8;
- } else if (*ldb < max(1,nrowb)) {
- info = 10;
- } else if (*ldc < max(1,*m)) {
- info = 13;
- }
- if (info != 0) {
- xerbla_("ZGEMM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0)
- && ((beta->r == 1. && beta->i == 0.)))) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if ((alpha->r == 0. && alpha->i == 0.)) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
- z__1.i = beta->r * c__[i__4].i + beta->i * c__[
- i__4].r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L30: */
- }
-/* L40: */
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (notb) {
- if (nota) {
-
-/* Form C := alpha*A*B + beta*C. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L50: */
- }
- } else if (beta->r != 1. || beta->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__1.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L60: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = l + j * b_dim1;
- if (b[i__3].r != 0. || b[i__3].i != 0.) {
- i__3 = l + j * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
- z__1.i = alpha->r * b[i__3].i + alpha->i * b[
- i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
- z__2.i = temp.r * a[i__6].i + temp.i * a[
- i__6].r;
- z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
- .i + z__2.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L70: */
- }
- }
-/* L80: */
- }
-/* L90: */
- }
- } else if (conja) {
-
-/* Form C := alpha*conjg( A' )*B + beta*C. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = l + j * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
- z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L100: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L110: */
- }
-/* L120: */
- }
- } else {
-
-/* Form C := alpha*A'*B + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- i__4 = l + i__ * a_dim1;
- i__5 = l + j * b_dim1;
- z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
- .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
- .i * b[i__5].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L130: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L140: */
- }
-/* L150: */
- }
- }
- } else if (nota) {
- if (conjb) {
-
-/* Form C := alpha*A*conjg( B' ) + beta*C. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L160: */
- }
- } else if (beta->r != 1. || beta->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__1.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L170: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * b_dim1;
- if (b[i__3].r != 0. || b[i__3].i != 0.) {
- d_cnjg(&z__2, &b[j + l * b_dim1]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
- z__1.i = alpha->r * z__2.i + alpha->i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
- z__2.i = temp.r * a[i__6].i + temp.i * a[
- i__6].r;
- z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
- .i + z__2.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L180: */
- }
- }
-/* L190: */
- }
-/* L200: */
- }
- } else {
-
-/* Form C := alpha*A*B' + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L210: */
- }
- } else if (beta->r != 1. || beta->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__1.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L220: */
- }
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * b_dim1;
- if (b[i__3].r != 0. || b[i__3].i != 0.) {
- i__3 = j + l * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
- z__1.i = alpha->r * b[i__3].i + alpha->i * b[
- i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
- z__2.i = temp.r * a[i__6].i + temp.i * a[
- i__6].r;
- z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
- .i + z__2.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L230: */
- }
- }
-/* L240: */
- }
-/* L250: */
- }
- }
- } else if (conja) {
- if (conjb) {
-
-/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- d_cnjg(&z__4, &b[j + l * b_dim1]);
- z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i =
- z__3.r * z__4.i + z__3.i * z__4.r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L260: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L270: */
- }
-/* L280: */
- }
- } else {
-
-/* Form C := alpha*conjg( A' )*B' + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = j + l * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
- z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L290: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L300: */
- }
-/* L310: */
- }
- }
- } else {
- if (conjb) {
-
-/* Form C := alpha*A'*conjg( B' ) + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- i__4 = l + i__ * a_dim1;
- d_cnjg(&z__3, &b[j + l * b_dim1]);
- z__2.r = a[i__4].r * z__3.r - a[i__4].i * z__3.i,
- z__2.i = a[i__4].r * z__3.i + a[i__4].i *
- z__3.r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L320: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L330: */
- }
-/* L340: */
- }
- } else {
-
-/* Form C := alpha*A'*B' + beta*C */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- i__4 = l + i__ * a_dim1;
- i__5 = j + l * b_dim1;
- z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
- .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
- .i * b[i__5].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L350: */
- }
- if ((beta->r == 0. && beta->i == 0.)) {
- i__3 = i__ + j * c_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i,
- z__2.i = alpha->r * temp.i + alpha->i *
- temp.r;
- i__4 = i__ + j * c_dim1;
- z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
- .i, z__3.i = beta->r * c__[i__4].i + beta->i *
- c__[i__4].r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L360: */
- }
-/* L370: */
- }
- }
- }
-
- return 0;
-
-/* End of ZGEMM . */
-
-} /* zgemm_ */
-
-/* Subroutine */ int zgemv_(char *trans, integer *m, integer *n,
- doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
- x, integer *incx, doublecomplex *beta, doublecomplex *y, integer *
- incy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublecomplex temp;
- static integer lenx, leny;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconj;
-
-
-/*
- Purpose
- =======
-
- ZGEMV performs one of the matrix-vector operations
-
- y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or
-
- y := alpha*conjg( A' )*x + beta*y,
-
- where alpha and beta are scalars, x and y are vectors and A is an
- m by n matrix.
-
- Parameters
- ==========
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
-
- TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
-
- TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix A.
- M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry, the leading m by n part of the array A must
- contain the matrix of coefficients.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, m ).
- Unchanged on exit.
-
- X - COMPLEX*16 array of DIMENSION at least
- ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
- and at least
- ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
- Before entry, the incremented array X must contain the
- vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- BETA - COMPLEX*16 .
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then Y need not be set on input.
- Unchanged on exit.
-
- Y - COMPLEX*16 array of DIMENSION at least
- ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
- and at least
- ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
- Before entry with BETA non-zero, the incremented array Y
- must contain the vector y. On exit, Y is overwritten by the
- updated vector y.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
- --y;
-
- /* Function Body */
- info = 0;
- if (((! lsame_(trans, "N") && ! lsame_(trans, "T")) && ! lsame_(trans, "C"))) {
- info = 1;
- } else if (*m < 0) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*lda < max(1,*m)) {
- info = 6;
- } else if (*incx == 0) {
- info = 8;
- } else if (*incy == 0) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("ZGEMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || ((alpha->r == 0. && alpha->i == 0.) && ((
- beta->r == 1. && beta->i == 0.)))) {
- return 0;
- }
-
- noconj = lsame_(trans, "T");
-
-/*
- Set LENX and LENY, the lengths of the vectors x and y, and set
- up the start points in X and Y.
-*/
-
- if (lsame_(trans, "N")) {
- lenx = *n;
- leny = *m;
- } else {
- lenx = *m;
- leny = *n;
- }
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (lenx - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (leny - 1) * *incy;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-
- First form y := beta*y.
-*/
-
- if (beta->r != 1. || beta->i != 0.) {
- if (*incy == 1) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- y[i__2].r = 0., y[i__2].i = 0.;
-/* L10: */
- }
- } else {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
- z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
- .r;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
-/* L20: */
- }
- }
- } else {
- iy = ky;
- if ((beta->r == 0. && beta->i == 0.)) {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- y[i__2].r = 0., y[i__2].i = 0.;
- iy += *incy;
-/* L30: */
- }
- } else {
- i__1 = leny;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- i__3 = iy;
- z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
- z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
- .r;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- iy += *incy;
-/* L40: */
- }
- }
- }
- }
- if ((alpha->r == 0. && alpha->i == 0.)) {
- return 0;
- }
- if (lsame_(trans, "N")) {
-
-/* Form y := alpha*A*x + y. */
-
- jx = kx;
- if (*incy == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- i__2 = jx;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__;
- i__4 = i__;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
- z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
-/* L50: */
- }
- }
- jx += *incx;
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- i__2 = jx;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- temp.r = z__1.r, temp.i = z__1.i;
- iy = ky;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = iy;
- i__4 = iy;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
- z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
- iy += *incy;
-/* L70: */
- }
- }
- jx += *incx;
-/* L80: */
- }
- }
- } else {
-
-/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
-
- jy = ky;
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp.r = 0., temp.i = 0.;
- if (noconj) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
- .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
- .i * x[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L90: */
- }
- } else {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = i__;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L100: */
- }
- }
- i__2 = jy;
- i__3 = jy;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
- alpha->r * temp.i + alpha->i * temp.r;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- jy += *incy;
-/* L110: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp.r = 0., temp.i = 0.;
- ix = kx;
- if (noconj) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = ix;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
- .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
- .i * x[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix += *incx;
-/* L120: */
- }
- } else {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = ix;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix += *incx;
-/* L130: */
- }
- }
- i__2 = jy;
- i__3 = jy;
- z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
- alpha->r * temp.i + alpha->i * temp.r;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- jy += *incy;
-/* L140: */
- }
- }
- }
-
- return 0;
-
-/* End of ZGEMV . */
-
-} /* zgemv_ */
-
-/* Subroutine */ int zgerc_(integer *m, integer *n, doublecomplex *alpha,
- doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
- doublecomplex *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublecomplex z__1, z__2;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, jy, kx, info;
- static doublecomplex temp;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZGERC performs the rank 1 operation
-
- A := alpha*x*conjg( y' ) + A,
-
- where alpha is a scalar, x is an m element vector, y is an n element
- vector and A is an m by n matrix.
-
- Parameters
- ==========
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix A.
- M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( m - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the m
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- Y - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y.
- Unchanged on exit.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry, the leading m by n part of the array A must
- contain the matrix of coefficients. On exit, A is
- overwritten by the updated matrix.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --x;
- --y;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if (*m < 0) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*incx == 0) {
- info = 5;
- } else if (*incy == 0) {
- info = 7;
- } else if (*lda < max(1,*m)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("ZGERC ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
- return 0;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (*incy > 0) {
- jy = 1;
- } else {
- jy = 1 - (*n - 1) * *incy;
- }
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jy;
- if (y[i__2].r != 0. || y[i__2].i != 0.) {
- d_cnjg(&z__2, &y[jy]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = i__;
- z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
- x[i__5].r * temp.i + x[i__5].i * temp.r;
- z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
-/* L10: */
- }
- }
- jy += *incy;
-/* L20: */
- }
- } else {
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*m - 1) * *incx;
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jy;
- if (y[i__2].r != 0. || y[i__2].i != 0.) {
- d_cnjg(&z__2, &y[jy]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- ix = kx;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = ix;
- z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
- x[i__5].r * temp.i + x[i__5].i * temp.r;
- z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
- ix += *incx;
-/* L30: */
- }
- }
- jy += *incy;
-/* L40: */
- }
- }
-
- return 0;
-
-/* End of ZGERC . */
-
-} /* zgerc_ */
-
-/* Subroutine */ int zgeru_(integer *m, integer *n, doublecomplex *alpha,
- doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
- doublecomplex *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublecomplex z__1, z__2;
-
- /* Local variables */
- static integer i__, j, ix, jy, kx, info;
- static doublecomplex temp;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZGERU performs the rank 1 operation
-
- A := alpha*x*y' + A,
-
- where alpha is a scalar, x is an m element vector, y is an n element
- vector and A is an m by n matrix.
-
- Parameters
- ==========
-
- M - INTEGER.
- On entry, M specifies the number of rows of the matrix A.
- M must be at least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( m - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the m
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- Y - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y.
- Unchanged on exit.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry, the leading m by n part of the array A must
- contain the matrix of coefficients. On exit, A is
- overwritten by the updated matrix.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --x;
- --y;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if (*m < 0) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*incx == 0) {
- info = 5;
- } else if (*incy == 0) {
- info = 7;
- } else if (*lda < max(1,*m)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("ZGERU ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
- return 0;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (*incy > 0) {
- jy = 1;
- } else {
- jy = 1 - (*n - 1) * *incy;
- }
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jy;
- if (y[i__2].r != 0. || y[i__2].i != 0.) {
- i__2 = jy;
- z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
- alpha->r * y[i__2].i + alpha->i * y[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = i__;
- z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
- x[i__5].r * temp.i + x[i__5].i * temp.r;
- z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
-/* L10: */
- }
- }
- jy += *incy;
-/* L20: */
- }
- } else {
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*m - 1) * *incx;
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jy;
- if (y[i__2].r != 0. || y[i__2].i != 0.) {
- i__2 = jy;
- z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
- alpha->r * y[i__2].i + alpha->i * y[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- ix = kx;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = ix;
- z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
- x[i__5].r * temp.i + x[i__5].i * temp.r;
- z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
- ix += *incx;
-/* L30: */
- }
- }
- jy += *incy;
-/* L40: */
- }
- }
-
- return 0;
-
-/* End of ZGERU . */
-
-} /* zgeru_ */
-
-/* Subroutine */ int zhemv_(char *uplo, integer *n, doublecomplex *alpha,
- doublecomplex *a, integer *lda, doublecomplex *x, integer *incx,
- doublecomplex *beta, doublecomplex *y, integer *incy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublereal d__1;
- doublecomplex z__1, z__2, z__3, z__4;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublecomplex temp1, temp2;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZHEMV performs the matrix-vector operation
-
- y := alpha*A*x + beta*y,
-
- where alpha and beta are scalars, x and y are n element vectors and
- A is an n by n hermitian matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array A is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of A
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of A
- is to be referenced.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular part of the hermitian matrix and the strictly
- lower triangular part of A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular part of the hermitian matrix and the strictly
- upper triangular part of A is not referenced.
- Note that the imaginary parts of the diagonal elements need
- not be set and are assumed to be zero.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- BETA - COMPLEX*16 .
- On entry, BETA specifies the scalar beta. When BETA is
- supplied as zero then Y need not be set on input.
- Unchanged on exit.
-
- Y - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y. On exit, Y is overwritten by the updated
- vector y.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
- --y;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*lda < max(1,*n)) {
- info = 5;
- } else if (*incx == 0) {
- info = 7;
- } else if (*incy == 0) {
- info = 10;
- }
- if (info != 0) {
- xerbla_("ZHEMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || ((alpha->r == 0. && alpha->i == 0.) && ((beta->r == 1. &&
- beta->i == 0.)))) {
- return 0;
- }
-
-/* Set up the start points in X and Y. */
-
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*n - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (*n - 1) * *incy;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through the triangular part
- of A.
-
- First form y := beta*y.
-*/
-
- if (beta->r != 1. || beta->i != 0.) {
- if (*incy == 1) {
- if ((beta->r == 0. && beta->i == 0.)) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- y[i__2].r = 0., y[i__2].i = 0.;
-/* L10: */
- }
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
- z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
- .r;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
-/* L20: */
- }
- }
- } else {
- iy = ky;
- if ((beta->r == 0. && beta->i == 0.)) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- y[i__2].r = 0., y[i__2].i = 0.;
- iy += *incy;
-/* L30: */
- }
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = iy;
- i__3 = iy;
- z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
- z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
- .r;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- iy += *incy;
-/* L40: */
- }
- }
- }
- }
- if ((alpha->r == 0. && alpha->i == 0.)) {
- return 0;
- }
- if (lsame_(uplo, "U")) {
-
-/* Form y when A is stored in upper triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
- alpha->r * x[i__2].i + alpha->i * x[i__2].r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- temp2.r = 0., temp2.i = 0.;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__;
- i__4 = i__;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
- z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = i__;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
- z__3.r * x[i__3].i + z__3.i * x[i__3].r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
-/* L50: */
- }
- i__2 = j;
- i__3 = j;
- i__4 = j + j * a_dim1;
- d__1 = a[i__4].r;
- z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
- z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
- z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
- alpha->r * temp2.i + alpha->i * temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
-/* L60: */
- }
- } else {
- jx = kx;
- jy = ky;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
- alpha->r * x[i__2].i + alpha->i * x[i__2].r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- temp2.r = 0., temp2.i = 0.;
- ix = kx;
- iy = ky;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = iy;
- i__4 = iy;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
- z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = ix;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
- z__3.r * x[i__3].i + z__3.i * x[i__3].r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
- ix += *incx;
- iy += *incy;
-/* L70: */
- }
- i__2 = jy;
- i__3 = jy;
- i__4 = j + j * a_dim1;
- d__1 = a[i__4].r;
- z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
- z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
- z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
- alpha->r * temp2.i + alpha->i * temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- jx += *incx;
- jy += *incy;
-/* L80: */
- }
- }
- } else {
-
-/* Form y when A is stored in lower triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
- alpha->r * x[i__2].i + alpha->i * x[i__2].r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- temp2.r = 0., temp2.i = 0.;
- i__2 = j;
- i__3 = j;
- i__4 = j + j * a_dim1;
- d__1 = a[i__4].r;
- z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__;
- i__4 = i__;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
- z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = i__;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
- z__3.r * x[i__3].i + z__3.i * x[i__3].r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
-/* L90: */
- }
- i__2 = j;
- i__3 = j;
- z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
- alpha->r * temp2.i + alpha->i * temp2.r;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
-/* L100: */
- }
- } else {
- jx = kx;
- jy = ky;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
- alpha->r * x[i__2].i + alpha->i * x[i__2].r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- temp2.r = 0., temp2.i = 0.;
- i__2 = jy;
- i__3 = jy;
- i__4 = j + j * a_dim1;
- d__1 = a[i__4].r;
- z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- ix = jx;
- iy = jy;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- iy += *incy;
- i__3 = iy;
- i__4 = iy;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
- z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
- .r;
- z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
- y[i__3].r = z__1.r, y[i__3].i = z__1.i;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = ix;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
- z__3.r * x[i__3].i + z__3.i * x[i__3].r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
-/* L110: */
- }
- i__2 = jy;
- i__3 = jy;
- z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
- alpha->r * temp2.i + alpha->i * temp2.r;
- z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
- y[i__2].r = z__1.r, y[i__2].i = z__1.i;
- jx += *incx;
- jy += *incy;
-/* L120: */
- }
- }
- }
-
- return 0;
-
-/* End of ZHEMV . */
-
-} /* zhemv_ */
-
-/* Subroutine */ int zher2_(char *uplo, integer *n, doublecomplex *alpha,
- doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
- doublecomplex *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
- doublereal d__1;
- doublecomplex z__1, z__2, z__3, z__4;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, iy, jx, jy, kx, ky, info;
- static doublecomplex temp1, temp2;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZHER2 performs the hermitian rank 2 operation
-
- A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
-
- where alpha is a scalar, x and y are n element vectors and A is an n
- by n hermitian matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array A is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of A
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of A
- is to be referenced.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x.
- Unchanged on exit.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
- Y - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCY ) ).
- Before entry, the incremented array Y must contain the n
- element vector y.
- Unchanged on exit.
-
- INCY - INTEGER.
- On entry, INCY specifies the increment for the elements of
- Y. INCY must not be zero.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular part of the hermitian matrix and the strictly
- lower triangular part of A is not referenced. On exit, the
- upper triangular part of the array A is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular part of the hermitian matrix and the strictly
- upper triangular part of A is not referenced. On exit, the
- lower triangular part of the array A is overwritten by the
- lower triangular part of the updated matrix.
- Note that the imaginary parts of the diagonal elements need
- not be set, they are assumed to be zero, and on exit they
- are set to zero.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --x;
- --y;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (*n < 0) {
- info = 2;
- } else if (*incx == 0) {
- info = 5;
- } else if (*incy == 0) {
- info = 7;
- } else if (*lda < max(1,*n)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("ZHER2 ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || (alpha->r == 0. && alpha->i == 0.)) {
- return 0;
- }
-
-/*
- Set up the start points in X and Y if the increments are not both
- unity.
-*/
-
- if (*incx != 1 || *incy != 1) {
- if (*incx > 0) {
- kx = 1;
- } else {
- kx = 1 - (*n - 1) * *incx;
- }
- if (*incy > 0) {
- ky = 1;
- } else {
- ky = 1 - (*n - 1) * *incy;
- }
- jx = kx;
- jy = ky;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through the triangular part
- of A.
-*/
-
- if (lsame_(uplo, "U")) {
-
-/* Form A when A is stored in the upper triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- i__3 = j;
- if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
- y[i__3].i != 0.)) {
- d_cnjg(&z__2, &y[j]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__2 = j;
- z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = i__;
- z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
- z__3.i = x[i__5].r * temp1.i + x[i__5].i *
- temp1.r;
- z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
- z__3.i;
- i__6 = i__;
- z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
- z__4.i = y[i__6].r * temp2.i + y[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
-/* L10: */
- }
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- i__4 = j;
- z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
- z__2.i = x[i__4].r * temp1.i + x[i__4].i *
- temp1.r;
- i__5 = j;
- z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
- z__3.i = y[i__5].r * temp2.i + y[i__5].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = a[i__3].r + z__1.r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- } else {
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- d__1 = a[i__3].r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- i__3 = jy;
- if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
- y[i__3].i != 0.)) {
- d_cnjg(&z__2, &y[jy]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__2 = jx;
- z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- ix = kx;
- iy = ky;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = ix;
- z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
- z__3.i = x[i__5].r * temp1.i + x[i__5].i *
- temp1.r;
- z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
- z__3.i;
- i__6 = iy;
- z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
- z__4.i = y[i__6].r * temp2.i + y[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
- ix += *incx;
- iy += *incy;
-/* L30: */
- }
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- i__4 = jx;
- z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
- z__2.i = x[i__4].r * temp1.i + x[i__4].i *
- temp1.r;
- i__5 = jy;
- z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
- z__3.i = y[i__5].r * temp2.i + y[i__5].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = a[i__3].r + z__1.r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- } else {
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- d__1 = a[i__3].r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- }
- jx += *incx;
- jy += *incy;
-/* L40: */
- }
- }
- } else {
-
-/* Form A when A is stored in the lower triangle. */
-
- if ((*incx == 1 && *incy == 1)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- i__3 = j;
- if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
- y[i__3].i != 0.)) {
- d_cnjg(&z__2, &y[j]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__2 = j;
- z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- i__4 = j;
- z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
- z__2.i = x[i__4].r * temp1.i + x[i__4].i *
- temp1.r;
- i__5 = j;
- z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
- z__3.i = y[i__5].r * temp2.i + y[i__5].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = a[i__3].r + z__1.r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = i__;
- z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
- z__3.i = x[i__5].r * temp1.i + x[i__5].i *
- temp1.r;
- z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
- z__3.i;
- i__6 = i__;
- z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
- z__4.i = y[i__6].r * temp2.i + y[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
-/* L50: */
- }
- } else {
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- d__1 = a[i__3].r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- i__3 = jy;
- if (x[i__2].r != 0. || x[i__2].i != 0. || (y[i__3].r != 0. ||
- y[i__3].i != 0.)) {
- d_cnjg(&z__2, &y[jy]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
- alpha->r * z__2.i + alpha->i * z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__2 = jx;
- z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
- z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
- .r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- i__4 = jx;
- z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
- z__2.i = x[i__4].r * temp1.i + x[i__4].i *
- temp1.r;
- i__5 = jy;
- z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
- z__3.i = y[i__5].r * temp2.i + y[i__5].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = a[i__3].r + z__1.r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- ix = jx;
- iy = jy;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- iy += *incy;
- i__3 = i__ + j * a_dim1;
- i__4 = i__ + j * a_dim1;
- i__5 = ix;
- z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
- z__3.i = x[i__5].r * temp1.i + x[i__5].i *
- temp1.r;
- z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
- z__3.i;
- i__6 = iy;
- z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
- z__4.i = y[i__6].r * temp2.i + y[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
- a[i__3].r = z__1.r, a[i__3].i = z__1.i;
-/* L70: */
- }
- } else {
- i__2 = j + j * a_dim1;
- i__3 = j + j * a_dim1;
- d__1 = a[i__3].r;
- a[i__2].r = d__1, a[i__2].i = 0.;
- }
- jx += *incx;
- jy += *incy;
-/* L80: */
- }
- }
- }
-
- return 0;
-
-/* End of ZHER2 . */
-
-} /* zher2_ */
-
-/* Subroutine */ int zher2k_(char *uplo, char *trans, integer *n, integer *k,
- doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
- b, integer *ldb, doublereal *beta, doublecomplex *c__, integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
- i__3, i__4, i__5, i__6, i__7;
- doublereal d__1;
- doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, l, info;
- static doublecomplex temp1, temp2;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZHER2K performs one of the hermitian rank 2k operations
-
- C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C,
-
- or
-
- C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C,
-
- where alpha and beta are scalars with beta real, C is an n by n
- hermitian matrix and A and B are n by k matrices in the first case
- and k by n matrices in the second case.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array C is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of C
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of C
- is to be referenced.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) +
- conjg( alpha )*B*conjg( A' ) +
- beta*C.
-
- TRANS = 'C' or 'c' C := alpha*conjg( A' )*B +
- conjg( alpha )*conjg( B' )*A +
- beta*C.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry with TRANS = 'N' or 'n', K specifies the number
- of columns of the matrices A and B, and on entry with
- TRANS = 'C' or 'c', K specifies the number of rows of the
- matrices A and B. K must be at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array A must contain the matrix A, otherwise
- the leading k by n part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDA must be at least max( 1, n ), otherwise LDA must
- be at least max( 1, k ).
- Unchanged on exit.
-
- B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array B must contain the matrix B, otherwise
- the leading k by n part of the array B must contain the
- matrix B.
- Unchanged on exit.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDB must be at least max( 1, n ), otherwise LDB must
- be at least max( 1, k ).
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION .
- On entry, BETA specifies the scalar beta.
- Unchanged on exit.
-
- C - COMPLEX*16 array of DIMENSION ( LDC, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array C must contain the upper
- triangular part of the hermitian matrix and the strictly
- lower triangular part of C is not referenced. On exit, the
- upper triangular part of the array C is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array C must contain the lower
- triangular part of the hermitian matrix and the strictly
- upper triangular part of C is not referenced. On exit, the
- lower triangular part of the array C is overwritten by the
- lower triangular part of the updated matrix.
- Note that the imaginary parts of the diagonal elements need
- not be set, they are assumed to be zero, and on exit they
- are set to zero.
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
- -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
- Ed Anderson, Cray Research Inc.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- if (lsame_(trans, "N")) {
- nrowa = *n;
- } else {
- nrowa = *k;
- }
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! upper && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if ((! lsame_(trans, "N") && ! lsame_(trans,
- "C"))) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*k < 0) {
- info = 4;
- } else if (*lda < max(1,nrowa)) {
- info = 7;
- } else if (*ldb < max(1,nrowa)) {
- info = 9;
- } else if (*ldc < max(1,*n)) {
- info = 12;
- }
- if (info != 0) {
- xerbla_("ZHER2K", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || (((alpha->r == 0. && alpha->i == 0.) || *k == 0) && *beta
- == 1.)) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if ((alpha->r == 0. && alpha->i == 0.)) {
- if (upper) {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L30: */
- }
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
-/* L40: */
- }
- }
- } else {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L70: */
- }
-/* L80: */
- }
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lsame_(trans, "N")) {
-
-/*
- Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) +
- C.
-*/
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L90: */
- }
- } else if (*beta != 1.) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L100: */
- }
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * a_dim1;
- i__4 = j + l * b_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
- 0. || b[i__4].i != 0.)) {
- d_cnjg(&z__2, &b[j + l * b_dim1]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
- z__1.i = alpha->r * z__2.i + alpha->i *
- z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__3 = j + l * a_dim1;
- z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
- z__2.i = alpha->r * a[i__3].i + alpha->i * a[
- i__3].r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- i__3 = j - 1;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__3.r = a[i__6].r * temp1.r - a[i__6].i *
- temp1.i, z__3.i = a[i__6].r * temp1.i + a[
- i__6].i * temp1.r;
- z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
- .i + z__3.i;
- i__7 = i__ + l * b_dim1;
- z__4.r = b[i__7].r * temp2.r - b[i__7].i *
- temp2.i, z__4.i = b[i__7].r * temp2.i + b[
- i__7].i * temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
- z__4.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L110: */
- }
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- i__5 = j + l * a_dim1;
- z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
- z__2.i = a[i__5].r * temp1.i + a[i__5].i *
- temp1.r;
- i__6 = j + l * b_dim1;
- z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
- z__3.i = b[i__6].r * temp2.i + b[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- }
-/* L120: */
- }
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L140: */
- }
- } else if (*beta != 1.) {
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L150: */
- }
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * a_dim1;
- i__4 = j + l * b_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0. || (b[i__4].r !=
- 0. || b[i__4].i != 0.)) {
- d_cnjg(&z__2, &b[j + l * b_dim1]);
- z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
- z__1.i = alpha->r * z__2.i + alpha->i *
- z__2.r;
- temp1.r = z__1.r, temp1.i = z__1.i;
- i__3 = j + l * a_dim1;
- z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
- z__2.i = alpha->r * a[i__3].i + alpha->i * a[
- i__3].r;
- d_cnjg(&z__1, &z__2);
- temp2.r = z__1.r, temp2.i = z__1.i;
- i__3 = *n;
- for (i__ = j + 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__3.r = a[i__6].r * temp1.r - a[i__6].i *
- temp1.i, z__3.i = a[i__6].r * temp1.i + a[
- i__6].i * temp1.r;
- z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
- .i + z__3.i;
- i__7 = i__ + l * b_dim1;
- z__4.r = b[i__7].r * temp2.r - b[i__7].i *
- temp2.i, z__4.i = b[i__7].r * temp2.i + b[
- i__7].i * temp2.r;
- z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
- z__4.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L160: */
- }
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- i__5 = j + l * a_dim1;
- z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
- z__2.i = a[i__5].r * temp1.i + a[i__5].i *
- temp1.r;
- i__6 = j + l * b_dim1;
- z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
- z__3.i = b[i__6].r * temp2.i + b[i__6].i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- d__1 = c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- }
-/* L170: */
- }
-/* L180: */
- }
- }
- } else {
-
-/*
- Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A +
- C.
-*/
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp1.r = 0., temp1.i = 0.;
- temp2.r = 0., temp2.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = l + j * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
- z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
- .r;
- z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
- temp1.r = z__1.r, temp1.i = z__1.i;
- d_cnjg(&z__3, &b[l + i__ * b_dim1]);
- i__4 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
- z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
- .r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
-/* L190: */
- }
- if (i__ == j) {
- if (*beta == 0.) {
- i__3 = j + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- d__1 = z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- } else {
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- d__1 = *beta * c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- }
- } else {
- if (*beta == 0.) {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
- c__[i__4].i;
- z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__4.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
- z__4.i;
- d_cnjg(&z__6, alpha);
- z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
- z__5.i = z__6.r * temp2.i + z__6.i *
- temp2.r;
- z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
- z__5.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
- }
-/* L200: */
- }
-/* L210: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- temp1.r = 0., temp1.i = 0.;
- temp2.r = 0., temp2.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = l + j * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
- z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
- .r;
- z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
- temp1.r = z__1.r, temp1.i = z__1.i;
- d_cnjg(&z__3, &b[l + i__ * b_dim1]);
- i__4 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
- z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
- .r;
- z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
- temp2.r = z__1.r, temp2.i = z__1.i;
-/* L220: */
- }
- if (i__ == j) {
- if (*beta == 0.) {
- i__3 = j + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- d__1 = z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- } else {
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- d__1 = *beta * c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- }
- } else {
- if (*beta == 0.) {
- i__3 = i__ + j * c_dim1;
- z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__2.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- d_cnjg(&z__4, alpha);
- z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
- z__3.i = z__4.r * temp2.i + z__4.i *
- temp2.r;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
- z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
- c__[i__4].i;
- z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
- z__4.i = alpha->r * temp1.i + alpha->i *
- temp1.r;
- z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
- z__4.i;
- d_cnjg(&z__6, alpha);
- z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
- z__5.i = z__6.r * temp2.i + z__6.i *
- temp2.r;
- z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
- z__5.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
- }
-/* L230: */
- }
-/* L240: */
- }
- }
- }
-
- return 0;
-
-/* End of ZHER2K. */
-
-} /* zher2k_ */
-
-/* Subroutine */ int zherk_(char *uplo, char *trans, integer *n, integer *k,
- doublereal *alpha, doublecomplex *a, integer *lda, doublereal *beta,
- doublecomplex *c__, integer *ldc)
-{
- /* System generated locals */
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
- i__6;
- doublereal d__1;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, l, info;
- static doublecomplex temp;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static doublereal rtemp;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- Purpose
- =======
-
- ZHERK performs one of the hermitian rank k operations
-
- C := alpha*A*conjg( A' ) + beta*C,
-
- or
-
- C := alpha*conjg( A' )*A + beta*C,
-
- where alpha and beta are real scalars, C is an n by n hermitian
- matrix and A is an n by k matrix in the first case and a k by n
- matrix in the second case.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the upper or lower
- triangular part of the array C is to be referenced as
- follows:
-
- UPLO = 'U' or 'u' Only the upper triangular part of C
- is to be referenced.
-
- UPLO = 'L' or 'l' Only the lower triangular part of C
- is to be referenced.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C.
-
- TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix C. N must be
- at least zero.
- Unchanged on exit.
-
- K - INTEGER.
- On entry with TRANS = 'N' or 'n', K specifies the number
- of columns of the matrix A, and on entry with
- TRANS = 'C' or 'c', K specifies the number of rows of the
- matrix A. K must be at least zero.
- Unchanged on exit.
-
- ALPHA - DOUBLE PRECISION .
- On entry, ALPHA specifies the scalar alpha.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
- k when TRANS = 'N' or 'n', and is n otherwise.
- Before entry with TRANS = 'N' or 'n', the leading n by k
- part of the array A must contain the matrix A, otherwise
- the leading k by n part of the array A must contain the
- matrix A.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When TRANS = 'N' or 'n'
- then LDA must be at least max( 1, n ), otherwise LDA must
- be at least max( 1, k ).
- Unchanged on exit.
-
- BETA - DOUBLE PRECISION.
- On entry, BETA specifies the scalar beta.
- Unchanged on exit.
-
- C - COMPLEX*16 array of DIMENSION ( LDC, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array C must contain the upper
- triangular part of the hermitian matrix and the strictly
- lower triangular part of C is not referenced. On exit, the
- upper triangular part of the array C is overwritten by the
- upper triangular part of the updated matrix.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array C must contain the lower
- triangular part of the hermitian matrix and the strictly
- upper triangular part of C is not referenced. On exit, the
- lower triangular part of the array C is overwritten by the
- lower triangular part of the updated matrix.
- Note that the imaginary parts of the diagonal elements need
- not be set, they are assumed to be zero, and on exit they
- are set to zero.
-
- LDC - INTEGER.
- On entry, LDC specifies the first dimension of C as declared
- in the calling (sub) program. LDC must be at least
- max( 1, n ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
- -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
- Ed Anderson, Cray Research Inc.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
-
- /* Function Body */
- if (lsame_(trans, "N")) {
- nrowa = *n;
- } else {
- nrowa = *k;
- }
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! upper && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if ((! lsame_(trans, "N") && ! lsame_(trans,
- "C"))) {
- info = 2;
- } else if (*n < 0) {
- info = 3;
- } else if (*k < 0) {
- info = 4;
- } else if (*lda < max(1,nrowa)) {
- info = 7;
- } else if (*ldc < max(1,*n)) {
- info = 10;
- }
- if (info != 0) {
- xerbla_("ZHERK ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0 || ((*alpha == 0. || *k == 0) && *beta == 1.)) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if (*alpha == 0.) {
- if (upper) {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L30: */
- }
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
-/* L40: */
- }
- }
- } else {
- if (*beta == 0.) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L70: */
- }
-/* L80: */
- }
- }
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lsame_(trans, "N")) {
-
-/* Form C := alpha*A*conjg( A' ) + beta*C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L90: */
- }
- } else if (*beta != 1.) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L100: */
- }
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- d_cnjg(&z__2, &a[j + l * a_dim1]);
- z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = j - 1;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
- z__2.i = temp.r * a[i__6].i + temp.i * a[
- i__6].r;
- z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
- .i + z__2.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L110: */
- }
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- i__5 = i__ + l * a_dim1;
- z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
- .r;
- d__1 = c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- }
-/* L120: */
- }
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*beta == 0.) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- c__[i__3].r = 0., c__[i__3].i = 0.;
-/* L140: */
- }
- } else if (*beta != 1.) {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * c_dim1;
- i__4 = i__ + j * c_dim1;
- z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
- i__4].i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
-/* L150: */
- }
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- i__3 = j + l * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- d_cnjg(&z__2, &a[j + l * a_dim1]);
- z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = j + j * c_dim1;
- i__4 = j + j * c_dim1;
- i__5 = j + l * a_dim1;
- z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
- .r;
- d__1 = c__[i__4].r + z__1.r;
- c__[i__3].r = d__1, c__[i__3].i = 0.;
- i__3 = *n;
- for (i__ = j + 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * c_dim1;
- i__5 = i__ + j * c_dim1;
- i__6 = i__ + l * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
- z__2.i = temp.r * a[i__6].i + temp.i * a[
- i__6].r;
- z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
- .i + z__2.i;
- c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
-/* L160: */
- }
- }
-/* L170: */
- }
-/* L180: */
- }
- }
- } else {
-
-/* Form C := alpha*conjg( A' )*A + beta*C. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
- z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L190: */
- }
- if (*beta == 0.) {
- i__3 = i__ + j * c_dim1;
- z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
- i__4 = i__ + j * c_dim1;
- z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
- i__4].i;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L200: */
- }
- rtemp = 0.;
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- d_cnjg(&z__3, &a[l + j * a_dim1]);
- i__3 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
- z__3.r * a[i__3].i + z__3.i * a[i__3].r;
- z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
- rtemp = z__1.r;
-/* L210: */
- }
- if (*beta == 0.) {
- i__2 = j + j * c_dim1;
- d__1 = *alpha * rtemp;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *alpha * rtemp + *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
-/* L220: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- rtemp = 0.;
- i__2 = *k;
- for (l = 1; l <= i__2; ++l) {
- d_cnjg(&z__3, &a[l + j * a_dim1]);
- i__3 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
- z__3.r * a[i__3].i + z__3.i * a[i__3].r;
- z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
- rtemp = z__1.r;
-/* L230: */
- }
- if (*beta == 0.) {
- i__2 = j + j * c_dim1;
- d__1 = *alpha * rtemp;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- } else {
- i__2 = j + j * c_dim1;
- i__3 = j + j * c_dim1;
- d__1 = *alpha * rtemp + *beta * c__[i__3].r;
- c__[i__2].r = d__1, c__[i__2].i = 0.;
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- temp.r = 0., temp.i = 0.;
- i__3 = *k;
- for (l = 1; l <= i__3; ++l) {
- d_cnjg(&z__3, &a[l + i__ * a_dim1]);
- i__4 = l + j * a_dim1;
- z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
- z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
- .r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L240: */
- }
- if (*beta == 0.) {
- i__3 = i__ + j * c_dim1;
- z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- } else {
- i__3 = i__ + j * c_dim1;
- z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
- i__4 = i__ + j * c_dim1;
- z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
- i__4].i;
- z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
- c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
- }
-/* L250: */
- }
-/* L260: */
- }
- }
- }
-
- return 0;
-
-/* End of ZHERK . */
-
-} /* zherk_ */
-
-/* Subroutine */ int zscal_(integer *n, doublecomplex *za, doublecomplex *zx,
- integer *incx)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
- doublecomplex z__1;
-
- /* Local variables */
- static integer i__, ix;
-
-
-/*
- scales a vector by a constant.
- jack dongarra, 3/11/78.
- modified 3/93 to return if incx .le. 0.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zx;
-
- /* Function Body */
- if (*n <= 0 || *incx <= 0) {
- return 0;
- }
- if (*incx == 1) {
- goto L20;
- }
-
-/* code for increment not equal to 1 */
-
- ix = 1;
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = ix;
- i__3 = ix;
- z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
- i__3].i + za->i * zx[i__3].r;
- zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
- ix += *incx;
-/* L10: */
- }
- return 0;
-
-/* code for increment equal to 1 */
-
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- i__3 = i__;
- z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
- i__3].i + za->i * zx[i__3].r;
- zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
-/* L30: */
- }
- return 0;
-} /* zscal_ */
-
-/* Subroutine */ int zswap_(integer *n, doublecomplex *zx, integer *incx,
- doublecomplex *zy, integer *incy)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, ix, iy;
- static doublecomplex ztemp;
-
-
-/*
- interchanges two vectors.
- jack dongarra, 3/11/78.
- modified 12/3/93, array(1) declarations changed to array(*)
-*/
-
-
- /* Parameter adjustments */
- --zy;
- --zx;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
- if ((*incx == 1 && *incy == 1)) {
- goto L20;
- }
-
-/*
- code for unequal increments or equal increments not equal
- to 1
-*/
-
- ix = 1;
- iy = 1;
- if (*incx < 0) {
- ix = (-(*n) + 1) * *incx + 1;
- }
- if (*incy < 0) {
- iy = (-(*n) + 1) * *incy + 1;
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = ix;
- ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
- i__2 = ix;
- i__3 = iy;
- zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
- i__2 = iy;
- zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
- ix += *incx;
- iy += *incy;
-/* L10: */
- }
- return 0;
-
-/* code for both increments equal to 1 */
-L20:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__;
- ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
- i__2 = i__;
- i__3 = i__;
- zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
- i__2 = i__;
- zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
-/* L30: */
- }
- return 0;
-} /* zswap_ */
-
-/* Subroutine */ int ztrmm_(char *side, char *uplo, char *transa, char *diag,
- integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
- integer *lda, doublecomplex *b, integer *ldb)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
- i__6;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, k, info;
- static doublecomplex temp;
- static logical lside;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconj, nounit;
-
-
-/*
- Purpose
- =======
-
- ZTRMM performs one of the matrix-matrix operations
-
- B := alpha*op( A )*B, or B := alpha*B*op( A )
-
- where alpha is a scalar, B is an m by n matrix, A is a unit, or
- non-unit, upper or lower triangular matrix and op( A ) is one of
-
- op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
-
- Parameters
- ==========
-
- SIDE - CHARACTER*1.
- On entry, SIDE specifies whether op( A ) multiplies B from
- the left or right as follows:
-
- SIDE = 'L' or 'l' B := alpha*op( A )*B.
-
- SIDE = 'R' or 'r' B := alpha*B*op( A ).
-
- Unchanged on exit.
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix A is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n' op( A ) = A.
-
- TRANSA = 'T' or 't' op( A ) = A'.
-
- TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit triangular
- as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of B. M must be at
- least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of B. N must be
- at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha. When alpha is
- zero then A is not referenced and B need not be set before
- entry.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
- when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
- Before entry with UPLO = 'U' or 'u', the leading k by k
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading k by k
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When SIDE = 'L' or 'l' then
- LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
- then LDA must be at least max( 1, n ).
- Unchanged on exit.
-
- B - COMPLEX*16 array of DIMENSION ( LDB, n ).
- Before entry, the leading m by n part of the array B must
- contain the matrix B, and on exit is overwritten by the
- transformed matrix.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. LDB must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- lside = lsame_(side, "L");
- if (lside) {
- nrowa = *m;
- } else {
- nrowa = *n;
- }
- noconj = lsame_(transa, "T");
- nounit = lsame_(diag, "N");
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! lside && ! lsame_(side, "R"))) {
- info = 1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
- info = 2;
- } else if (((! lsame_(transa, "N") && ! lsame_(
- transa, "T")) && ! lsame_(transa, "C"))) {
- info = 3;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 4;
- } else if (*m < 0) {
- info = 5;
- } else if (*n < 0) {
- info = 6;
- } else if (*lda < max(1,nrowa)) {
- info = 9;
- } else if (*ldb < max(1,*m)) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("ZTRMM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if ((alpha->r == 0. && alpha->i == 0.)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- b[i__3].r = 0., b[i__3].i = 0.;
-/* L10: */
- }
-/* L20: */
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lside) {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*A*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (k = 1; k <= i__2; ++k) {
- i__3 = k + j * b_dim1;
- if (b[i__3].r != 0. || b[i__3].i != 0.) {
- i__3 = k + j * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
- .i, z__1.i = alpha->r * b[i__3].i +
- alpha->i * b[i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = k - 1;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = i__ + k * a_dim1;
- z__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
- .i, z__2.i = temp.r * a[i__6].i +
- temp.i * a[i__6].r;
- z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
- .i + z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L30: */
- }
- if (nounit) {
- i__3 = k + k * a_dim1;
- z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
- .i, z__1.i = temp.r * a[i__3].i +
- temp.i * a[i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__3 = k + j * b_dim1;
- b[i__3].r = temp.r, b[i__3].i = temp.i;
- }
-/* L40: */
- }
-/* L50: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (k = *m; k >= 1; --k) {
- i__2 = k + j * b_dim1;
- if (b[i__2].r != 0. || b[i__2].i != 0.) {
- i__2 = k + j * b_dim1;
- z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
- .i, z__1.i = alpha->r * b[i__2].i +
- alpha->i * b[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = k + j * b_dim1;
- b[i__2].r = temp.r, b[i__2].i = temp.i;
- if (nounit) {
- i__2 = k + j * b_dim1;
- i__3 = k + j * b_dim1;
- i__4 = k + k * a_dim1;
- z__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
- a[i__4].i, z__1.i = b[i__3].r * a[
- i__4].i + b[i__3].i * a[i__4].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
- }
- i__2 = *m;
- for (i__ = k + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + k * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
- .i, z__2.i = temp.r * a[i__5].i +
- temp.i * a[i__5].r;
- z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
- .i + z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L60: */
- }
- }
-/* L70: */
- }
-/* L80: */
- }
- }
- } else {
-
-/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (i__ = *m; i__ >= 1; --i__) {
- i__2 = i__ + j * b_dim1;
- temp.r = b[i__2].r, temp.i = b[i__2].i;
- if (noconj) {
- if (nounit) {
- i__2 = i__ + i__ * a_dim1;
- z__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
- .i, z__1.i = temp.r * a[i__2].i +
- temp.i * a[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = i__ - 1;
- for (k = 1; k <= i__2; ++k) {
- i__3 = k + i__ * a_dim1;
- i__4 = k + j * b_dim1;
- z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
- b[i__4].i, z__2.i = a[i__3].r * b[
- i__4].i + a[i__3].i * b[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L90: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = i__ - 1;
- for (k = 1; k <= i__2; ++k) {
- d_cnjg(&z__3, &a[k + i__ * a_dim1]);
- i__3 = k + j * b_dim1;
- z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
- .i, z__2.i = z__3.r * b[i__3].i +
- z__3.i * b[i__3].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L100: */
- }
- }
- i__2 = i__ + j * b_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L110: */
- }
-/* L120: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- temp.r = b[i__3].r, temp.i = b[i__3].i;
- if (noconj) {
- if (nounit) {
- i__3 = i__ + i__ * a_dim1;
- z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
- .i, z__1.i = temp.r * a[i__3].i +
- temp.i * a[i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__3 = *m;
- for (k = i__ + 1; k <= i__3; ++k) {
- i__4 = k + i__ * a_dim1;
- i__5 = k + j * b_dim1;
- z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
- b[i__5].i, z__2.i = a[i__4].r * b[
- i__5].i + a[i__4].i * b[i__5].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L130: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__3 = *m;
- for (k = i__ + 1; k <= i__3; ++k) {
- d_cnjg(&z__3, &a[k + i__ * a_dim1]);
- i__4 = k + j * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
- .i, z__2.i = z__3.r * b[i__4].i +
- z__3.i * b[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L140: */
- }
- }
- i__3 = i__ + j * b_dim1;
- z__1.r = alpha->r * temp.r - alpha->i * temp.i,
- z__1.i = alpha->r * temp.i + alpha->i *
- temp.r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L150: */
- }
-/* L160: */
- }
- }
- }
- } else {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*B*A. */
-
- if (upper) {
- for (j = *n; j >= 1; --j) {
- temp.r = alpha->r, temp.i = alpha->i;
- if (nounit) {
- i__1 = j + j * a_dim1;
- z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
- z__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
- .r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + j * b_dim1;
- i__3 = i__ + j * b_dim1;
- z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
- z__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
- .r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L170: */
- }
- i__1 = j - 1;
- for (k = 1; k <= i__1; ++k) {
- i__2 = k + j * a_dim1;
- if (a[i__2].r != 0. || a[i__2].i != 0.) {
- i__2 = k + j * a_dim1;
- z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
- .i, z__1.i = alpha->r * a[i__2].i +
- alpha->i * a[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
- .i, z__2.i = temp.r * b[i__5].i +
- temp.i * b[i__5].r;
- z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
- .i + z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L180: */
- }
- }
-/* L190: */
- }
-/* L200: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- temp.r = alpha->r, temp.i = alpha->i;
- if (nounit) {
- i__2 = j + j * a_dim1;
- z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
- z__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
- .r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
- z__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
- .r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L210: */
- }
- i__2 = *n;
- for (k = j + 1; k <= i__2; ++k) {
- i__3 = k + j * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- i__3 = k + j * a_dim1;
- z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
- .i, z__1.i = alpha->r * a[i__3].i +
- alpha->i * a[i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
- .i, z__2.i = temp.r * b[i__6].i +
- temp.i * b[i__6].r;
- z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
- .i + z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L220: */
- }
- }
-/* L230: */
- }
-/* L240: */
- }
- }
- } else {
-
-/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
-
- if (upper) {
- i__1 = *n;
- for (k = 1; k <= i__1; ++k) {
- i__2 = k - 1;
- for (j = 1; j <= i__2; ++j) {
- i__3 = j + k * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- if (noconj) {
- i__3 = j + k * a_dim1;
- z__1.r = alpha->r * a[i__3].r - alpha->i * a[
- i__3].i, z__1.i = alpha->r * a[i__3]
- .i + alpha->i * a[i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[j + k * a_dim1]);
- z__1.r = alpha->r * z__2.r - alpha->i *
- z__2.i, z__1.i = alpha->r * z__2.i +
- alpha->i * z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
- .i, z__2.i = temp.r * b[i__6].i +
- temp.i * b[i__6].r;
- z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
- .i + z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L250: */
- }
- }
-/* L260: */
- }
- temp.r = alpha->r, temp.i = alpha->i;
- if (nounit) {
- if (noconj) {
- i__2 = k + k * a_dim1;
- z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
- z__1.i = temp.r * a[i__2].i + temp.i * a[
- i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[k + k * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- if (temp.r != 1. || temp.i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + k * b_dim1;
- i__4 = i__ + k * b_dim1;
- z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
- z__1.i = temp.r * b[i__4].i + temp.i * b[
- i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L270: */
- }
- }
-/* L280: */
- }
- } else {
- for (k = *n; k >= 1; --k) {
- i__1 = *n;
- for (j = k + 1; j <= i__1; ++j) {
- i__2 = j + k * a_dim1;
- if (a[i__2].r != 0. || a[i__2].i != 0.) {
- if (noconj) {
- i__2 = j + k * a_dim1;
- z__1.r = alpha->r * a[i__2].r - alpha->i * a[
- i__2].i, z__1.i = alpha->r * a[i__2]
- .i + alpha->i * a[i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[j + k * a_dim1]);
- z__1.r = alpha->r * z__2.r - alpha->i *
- z__2.i, z__1.i = alpha->r * z__2.i +
- alpha->i * z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
- .i, z__2.i = temp.r * b[i__5].i +
- temp.i * b[i__5].r;
- z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
- .i + z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L290: */
- }
- }
-/* L300: */
- }
- temp.r = alpha->r, temp.i = alpha->i;
- if (nounit) {
- if (noconj) {
- i__1 = k + k * a_dim1;
- z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
- z__1.i = temp.r * a[i__1].i + temp.i * a[
- i__1].r;
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[k + k * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- if (temp.r != 1. || temp.i != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + k * b_dim1;
- i__3 = i__ + k * b_dim1;
- z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
- z__1.i = temp.r * b[i__3].i + temp.i * b[
- i__3].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L310: */
- }
- }
-/* L320: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of ZTRMM . */
-
-} /* ztrmm_ */
-
-/* Subroutine */ int ztrmv_(char *uplo, char *trans, char *diag, integer *n,
- doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void d_cnjg(doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, jx, kx, info;
- static doublecomplex temp;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconj, nounit;
-
-
-/*
- Purpose
- =======
-
- ZTRMV performs one of the matrix-vector operations
-
- x := A*x, or x := A'*x, or x := conjg( A' )*x,
-
- where x is an n element vector and A is an n by n unit, or non-unit,
- upper or lower triangular matrix.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the operation to be performed as
- follows:
-
- TRANS = 'N' or 'n' x := A*x.
-
- TRANS = 'T' or 't' x := A'*x.
-
- TRANS = 'C' or 'c' x := conjg( A' )*x.
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit
- triangular as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element vector x. On exit, X is overwritten with the
- tranformed vector x.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (((! lsame_(trans, "N") && ! lsame_(trans,
- "T")) && ! lsame_(trans, "C"))) {
- info = 2;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 3;
- } else if (*n < 0) {
- info = 4;
- } else if (*lda < max(1,*n)) {
- info = 6;
- } else if (*incx == 0) {
- info = 8;
- }
- if (info != 0) {
- xerbla_("ZTRMV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
- noconj = lsame_(trans, "T");
- nounit = lsame_(diag, "N");
-
-/*
- Set up the start point in X if the increment is not unity. This
- will be ( N - 1 )*INCX too small for descending loops.
-*/
-
- if (*incx <= 0) {
- kx = 1 - (*n - 1) * *incx;
- } else if (*incx != 1) {
- kx = 1;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (lsame_(trans, "N")) {
-
-/* Form x := A*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- i__2 = j;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__;
- i__4 = i__;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[
- i__5].r;
- z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
- z__2.i;
- x[i__3].r = z__1.r, x[i__3].i = z__1.i;
-/* L10: */
- }
- if (nounit) {
- i__2 = j;
- i__3 = j;
- i__4 = j + j * a_dim1;
- z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
- i__4].i, z__1.i = x[i__3].r * a[i__4].i +
- x[i__3].i * a[i__4].r;
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
- }
- }
-/* L20: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- i__2 = jx;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- ix = kx;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = ix;
- i__4 = ix;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[
- i__5].r;
- z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
- z__2.i;
- x[i__3].r = z__1.r, x[i__3].i = z__1.i;
- ix += *incx;
-/* L30: */
- }
- if (nounit) {
- i__2 = jx;
- i__3 = jx;
- i__4 = j + j * a_dim1;
- z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
- i__4].i, z__1.i = x[i__3].r * a[i__4].i +
- x[i__3].i * a[i__4].r;
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
- }
- }
- jx += *incx;
-/* L40: */
- }
- }
- } else {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- i__1 = j;
- if (x[i__1].r != 0. || x[i__1].i != 0.) {
- i__1 = j;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- i__2 = i__;
- i__3 = i__;
- i__4 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
- z__2.i = temp.r * a[i__4].i + temp.i * a[
- i__4].r;
- z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
- z__2.i;
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
-/* L50: */
- }
- if (nounit) {
- i__1 = j;
- i__2 = j;
- i__3 = j + j * a_dim1;
- z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
- i__3].i, z__1.i = x[i__2].r * a[i__3].i +
- x[i__2].i * a[i__3].r;
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
- }
- }
-/* L60: */
- }
- } else {
- kx += (*n - 1) * *incx;
- jx = kx;
- for (j = *n; j >= 1; --j) {
- i__1 = jx;
- if (x[i__1].r != 0. || x[i__1].i != 0.) {
- i__1 = jx;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- ix = kx;
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- i__2 = ix;
- i__3 = ix;
- i__4 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
- z__2.i = temp.r * a[i__4].i + temp.i * a[
- i__4].r;
- z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
- z__2.i;
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
- ix -= *incx;
-/* L70: */
- }
- if (nounit) {
- i__1 = jx;
- i__2 = jx;
- i__3 = j + j * a_dim1;
- z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
- i__3].i, z__1.i = x[i__2].r * a[i__3].i +
- x[i__2].i * a[i__3].r;
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
- }
- }
- jx -= *incx;
-/* L80: */
- }
- }
- }
- } else {
-
-/* Form x := A'*x or x := conjg( A' )*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- i__1 = j;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- if (noconj) {
- if (nounit) {
- i__1 = j + j * a_dim1;
- z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
- z__1.i = temp.r * a[i__1].i + temp.i * a[
- i__1].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- i__1 = i__ + j * a_dim1;
- i__2 = i__;
- z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
- i__2].i, z__2.i = a[i__1].r * x[i__2].i +
- a[i__1].i * x[i__2].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L90: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__1 = i__;
- z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
- z__2.i = z__3.r * x[i__1].i + z__3.i * x[
- i__1].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L100: */
- }
- }
- i__1 = j;
- x[i__1].r = temp.r, x[i__1].i = temp.i;
-/* L110: */
- }
- } else {
- jx = kx + (*n - 1) * *incx;
- for (j = *n; j >= 1; --j) {
- i__1 = jx;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- ix = jx;
- if (noconj) {
- if (nounit) {
- i__1 = j + j * a_dim1;
- z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
- z__1.i = temp.r * a[i__1].i + temp.i * a[
- i__1].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- ix -= *incx;
- i__1 = i__ + j * a_dim1;
- i__2 = ix;
- z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
- i__2].i, z__2.i = a[i__1].r * x[i__2].i +
- a[i__1].i * x[i__2].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L120: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- for (i__ = j - 1; i__ >= 1; --i__) {
- ix -= *incx;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__1 = ix;
- z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
- z__2.i = z__3.r * x[i__1].i + z__3.i * x[
- i__1].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L130: */
- }
- }
- i__1 = jx;
- x[i__1].r = temp.r, x[i__1].i = temp.i;
- jx -= *incx;
-/* L140: */
- }
- }
- } else {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- if (noconj) {
- if (nounit) {
- i__2 = j + j * a_dim1;
- z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
- z__1.i = temp.r * a[i__2].i + temp.i * a[
- i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
- i__4].i, z__2.i = a[i__3].r * x[i__4].i +
- a[i__3].i * x[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L150: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = i__;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[
- i__3].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L160: */
- }
- }
- i__2 = j;
- x[i__2].r = temp.r, x[i__2].i = temp.i;
-/* L170: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- ix = jx;
- if (noconj) {
- if (nounit) {
- i__2 = j + j * a_dim1;
- z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
- z__1.i = temp.r * a[i__2].i + temp.i * a[
- i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- i__3 = i__ + j * a_dim1;
- i__4 = ix;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
- i__4].i, z__2.i = a[i__3].r * x[i__4].i +
- a[i__3].i * x[i__4].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L180: */
- }
- } else {
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z__1.r = temp.r * z__2.r - temp.i * z__2.i,
- z__1.i = temp.r * z__2.i + temp.i *
- z__2.r;
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = ix;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[
- i__3].r;
- z__1.r = temp.r + z__2.r, z__1.i = temp.i +
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L190: */
- }
- }
- i__2 = jx;
- x[i__2].r = temp.r, x[i__2].i = temp.i;
- jx += *incx;
-/* L200: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of ZTRMV . */
-
-} /* ztrmv_ */
-
-/* Subroutine */ int ztrsm_(char *side, char *uplo, char *transa, char *diag,
- integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
- integer *lda, doublecomplex *b, integer *ldb)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
- i__6, i__7;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
- doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, k, info;
- static doublecomplex temp;
- static logical lside;
- extern logical lsame_(char *, char *);
- static integer nrowa;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconj, nounit;
-
-
-/*
- Purpose
- =======
-
- ZTRSM solves one of the matrix equations
-
- op( A )*X = alpha*B, or X*op( A ) = alpha*B,
-
- where alpha is a scalar, X and B are m by n matrices, A is a unit, or
- non-unit, upper or lower triangular matrix and op( A ) is one of
-
- op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
-
- The matrix X is overwritten on B.
-
- Parameters
- ==========
-
- SIDE - CHARACTER*1.
- On entry, SIDE specifies whether op( A ) appears on the left
- or right of X as follows:
-
- SIDE = 'L' or 'l' op( A )*X = alpha*B.
-
- SIDE = 'R' or 'r' X*op( A ) = alpha*B.
-
- Unchanged on exit.
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix A is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANSA - CHARACTER*1.
- On entry, TRANSA specifies the form of op( A ) to be used in
- the matrix multiplication as follows:
-
- TRANSA = 'N' or 'n' op( A ) = A.
-
- TRANSA = 'T' or 't' op( A ) = A'.
-
- TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit triangular
- as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- M - INTEGER.
- On entry, M specifies the number of rows of B. M must be at
- least zero.
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the number of columns of B. N must be
- at least zero.
- Unchanged on exit.
-
- ALPHA - COMPLEX*16 .
- On entry, ALPHA specifies the scalar alpha. When alpha is
- zero then A is not referenced and B need not be set before
- entry.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
- when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
- Before entry with UPLO = 'U' or 'u', the leading k by k
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading k by k
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. When SIDE = 'L' or 'l' then
- LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
- then LDA must be at least max( 1, n ).
- Unchanged on exit.
-
- B - COMPLEX*16 array of DIMENSION ( LDB, n ).
- Before entry, the leading m by n part of the array B must
- contain the right-hand side matrix B, and on exit is
- overwritten by the solution matrix X.
-
- LDB - INTEGER.
- On entry, LDB specifies the first dimension of B as declared
- in the calling (sub) program. LDB must be at least
- max( 1, m ).
- Unchanged on exit.
-
-
- Level 3 Blas routine.
-
- -- Written on 8-February-1989.
- Jack Dongarra, Argonne National Laboratory.
- Iain Duff, AERE Harwell.
- Jeremy Du Croz, Numerical Algorithms Group Ltd.
- Sven Hammarling, Numerical Algorithms Group Ltd.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- lside = lsame_(side, "L");
- if (lside) {
- nrowa = *m;
- } else {
- nrowa = *n;
- }
- noconj = lsame_(transa, "T");
- nounit = lsame_(diag, "N");
- upper = lsame_(uplo, "U");
-
- info = 0;
- if ((! lside && ! lsame_(side, "R"))) {
- info = 1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
- info = 2;
- } else if (((! lsame_(transa, "N") && ! lsame_(
- transa, "T")) && ! lsame_(transa, "C"))) {
- info = 3;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 4;
- } else if (*m < 0) {
- info = 5;
- } else if (*n < 0) {
- info = 6;
- } else if (*lda < max(1,nrowa)) {
- info = 9;
- } else if (*ldb < max(1,*m)) {
- info = 11;
- }
- if (info != 0) {
- xerbla_("ZTRSM ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
-/* And when alpha.eq.zero. */
-
- if ((alpha->r == 0. && alpha->i == 0.)) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- b[i__3].r = 0., b[i__3].i = 0.;
-/* L10: */
- }
-/* L20: */
- }
- return 0;
- }
-
-/* Start the operations. */
-
- if (lside) {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*inv( A )*B. */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (alpha->r != 1. || alpha->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
- .i, z__1.i = alpha->r * b[i__4].i +
- alpha->i * b[i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L30: */
- }
- }
- for (k = *m; k >= 1; --k) {
- i__2 = k + j * b_dim1;
- if (b[i__2].r != 0. || b[i__2].i != 0.) {
- if (nounit) {
- i__2 = k + j * b_dim1;
- z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
- a_dim1]);
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
- }
- i__2 = k - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = k + j * b_dim1;
- i__6 = i__ + k * a_dim1;
- z__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
- a[i__6].i, z__2.i = b[i__5].r * a[
- i__6].i + b[i__5].i * a[i__6].r;
- z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
- .i - z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L40: */
- }
- }
-/* L50: */
- }
-/* L60: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (alpha->r != 1. || alpha->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
- .i, z__1.i = alpha->r * b[i__4].i +
- alpha->i * b[i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L70: */
- }
- }
- i__2 = *m;
- for (k = 1; k <= i__2; ++k) {
- i__3 = k + j * b_dim1;
- if (b[i__3].r != 0. || b[i__3].i != 0.) {
- if (nounit) {
- i__3 = k + j * b_dim1;
- z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
- a_dim1]);
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
- }
- i__3 = *m;
- for (i__ = k + 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = k + j * b_dim1;
- i__7 = i__ + k * a_dim1;
- z__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
- a[i__7].i, z__2.i = b[i__6].r * a[
- i__7].i + b[i__6].i * a[i__7].r;
- z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
- .i - z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L80: */
- }
- }
-/* L90: */
- }
-/* L100: */
- }
- }
- } else {
-
-/*
- Form B := alpha*inv( A' )*B
- or B := alpha*inv( conjg( A' ) )*B.
-*/
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
- z__1.i = alpha->r * b[i__3].i + alpha->i * b[
- i__3].r;
- temp.r = z__1.r, temp.i = z__1.i;
- if (noconj) {
- i__3 = i__ - 1;
- for (k = 1; k <= i__3; ++k) {
- i__4 = k + i__ * a_dim1;
- i__5 = k + j * b_dim1;
- z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
- b[i__5].i, z__2.i = a[i__4].r * b[
- i__5].i + a[i__4].i * b[i__5].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L110: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__3 = i__ - 1;
- for (k = 1; k <= i__3; ++k) {
- d_cnjg(&z__3, &a[k + i__ * a_dim1]);
- i__4 = k + j * b_dim1;
- z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
- .i, z__2.i = z__3.r * b[i__4].i +
- z__3.i * b[i__4].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L120: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__3 = i__ + j * b_dim1;
- b[i__3].r = temp.r, b[i__3].i = temp.i;
-/* L130: */
- }
-/* L140: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- for (i__ = *m; i__ >= 1; --i__) {
- i__2 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
- z__1.i = alpha->r * b[i__2].i + alpha->i * b[
- i__2].r;
- temp.r = z__1.r, temp.i = z__1.i;
- if (noconj) {
- i__2 = *m;
- for (k = i__ + 1; k <= i__2; ++k) {
- i__3 = k + i__ * a_dim1;
- i__4 = k + j * b_dim1;
- z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
- b[i__4].i, z__2.i = a[i__3].r * b[
- i__4].i + a[i__3].i * b[i__4].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L150: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__2 = *m;
- for (k = i__ + 1; k <= i__2; ++k) {
- d_cnjg(&z__3, &a[k + i__ * a_dim1]);
- i__3 = k + j * b_dim1;
- z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
- .i, z__2.i = z__3.r * b[i__3].i +
- z__3.i * b[i__3].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L160: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__2 = i__ + j * b_dim1;
- b[i__2].r = temp.r, b[i__2].i = temp.i;
-/* L170: */
- }
-/* L180: */
- }
- }
- }
- } else {
- if (lsame_(transa, "N")) {
-
-/* Form B := alpha*B*inv( A ). */
-
- if (upper) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (alpha->r != 1. || alpha->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
- .i, z__1.i = alpha->r * b[i__4].i +
- alpha->i * b[i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L190: */
- }
- }
- i__2 = j - 1;
- for (k = 1; k <= i__2; ++k) {
- i__3 = k + j * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = k + j * a_dim1;
- i__7 = i__ + k * b_dim1;
- z__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
- b[i__7].i, z__2.i = a[i__6].r * b[
- i__7].i + a[i__6].i * b[i__7].r;
- z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
- .i - z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L200: */
- }
- }
-/* L210: */
- }
- if (nounit) {
- z_div(&z__1, &c_b359, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
- z__1.i = temp.r * b[i__4].i + temp.i * b[
- i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L220: */
- }
- }
-/* L230: */
- }
- } else {
- for (j = *n; j >= 1; --j) {
- if (alpha->r != 1. || alpha->i != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + j * b_dim1;
- i__3 = i__ + j * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
- .i, z__1.i = alpha->r * b[i__3].i +
- alpha->i * b[i__3].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L240: */
- }
- }
- i__1 = *n;
- for (k = j + 1; k <= i__1; ++k) {
- i__2 = k + j * a_dim1;
- if (a[i__2].r != 0. || a[i__2].i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = k + j * a_dim1;
- i__6 = i__ + k * b_dim1;
- z__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
- b[i__6].i, z__2.i = a[i__5].r * b[
- i__6].i + a[i__5].i * b[i__6].r;
- z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
- .i - z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L250: */
- }
- }
-/* L260: */
- }
- if (nounit) {
- z_div(&z__1, &c_b359, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + j * b_dim1;
- i__3 = i__ + j * b_dim1;
- z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
- z__1.i = temp.r * b[i__3].i + temp.i * b[
- i__3].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L270: */
- }
- }
-/* L280: */
- }
- }
- } else {
-
-/*
- Form B := alpha*B*inv( A' )
- or B := alpha*B*inv( conjg( A' ) ).
-*/
-
- if (upper) {
- for (k = *n; k >= 1; --k) {
- if (nounit) {
- if (noconj) {
- z_div(&z__1, &c_b359, &a[k + k * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[k + k * a_dim1]);
- z_div(&z__1, &c_b359, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + k * b_dim1;
- i__3 = i__ + k * b_dim1;
- z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
- z__1.i = temp.r * b[i__3].i + temp.i * b[
- i__3].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L290: */
- }
- }
- i__1 = k - 1;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j + k * a_dim1;
- if (a[i__2].r != 0. || a[i__2].i != 0.) {
- if (noconj) {
- i__2 = j + k * a_dim1;
- temp.r = a[i__2].r, temp.i = a[i__2].i;
- } else {
- d_cnjg(&z__1, &a[j + k * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * b_dim1;
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
- .i, z__2.i = temp.r * b[i__5].i +
- temp.i * b[i__5].r;
- z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
- .i - z__2.i;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L300: */
- }
- }
-/* L310: */
- }
- if (alpha->r != 1. || alpha->i != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = i__ + k * b_dim1;
- i__3 = i__ + k * b_dim1;
- z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
- .i, z__1.i = alpha->r * b[i__3].i +
- alpha->i * b[i__3].r;
- b[i__2].r = z__1.r, b[i__2].i = z__1.i;
-/* L320: */
- }
- }
-/* L330: */
- }
- } else {
- i__1 = *n;
- for (k = 1; k <= i__1; ++k) {
- if (nounit) {
- if (noconj) {
- z_div(&z__1, &c_b359, &a[k + k * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- } else {
- d_cnjg(&z__2, &a[k + k * a_dim1]);
- z_div(&z__1, &c_b359, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + k * b_dim1;
- i__4 = i__ + k * b_dim1;
- z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
- z__1.i = temp.r * b[i__4].i + temp.i * b[
- i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L340: */
- }
- }
- i__2 = *n;
- for (j = k + 1; j <= i__2; ++j) {
- i__3 = j + k * a_dim1;
- if (a[i__3].r != 0. || a[i__3].i != 0.) {
- if (noconj) {
- i__3 = j + k * a_dim1;
- temp.r = a[i__3].r, temp.i = a[i__3].i;
- } else {
- d_cnjg(&z__1, &a[j + k * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- i__3 = *m;
- for (i__ = 1; i__ <= i__3; ++i__) {
- i__4 = i__ + j * b_dim1;
- i__5 = i__ + j * b_dim1;
- i__6 = i__ + k * b_dim1;
- z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
- .i, z__2.i = temp.r * b[i__6].i +
- temp.i * b[i__6].r;
- z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
- .i - z__2.i;
- b[i__4].r = z__1.r, b[i__4].i = z__1.i;
-/* L350: */
- }
- }
-/* L360: */
- }
- if (alpha->r != 1. || alpha->i != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + k * b_dim1;
- i__4 = i__ + k * b_dim1;
- z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
- .i, z__1.i = alpha->r * b[i__4].i +
- alpha->i * b[i__4].r;
- b[i__3].r = z__1.r, b[i__3].i = z__1.i;
-/* L370: */
- }
- }
-/* L380: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of ZTRSM . */
-
-} /* ztrsm_ */
-
-/* Subroutine */ int ztrsv_(char *uplo, char *trans, char *diag, integer *n,
- doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
- doublecomplex z__1, z__2, z__3;
-
- /* Builtin functions */
- void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
- doublecomplex *, doublecomplex *);
-
- /* Local variables */
- static integer i__, j, ix, jx, kx, info;
- static doublecomplex temp;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconj, nounit;
-
-
-/*
- Purpose
- =======
-
- ZTRSV solves one of the systems of equations
-
- A*x = b, or A'*x = b, or conjg( A' )*x = b,
-
- where b and x are n element vectors and A is an n by n unit, or
- non-unit, upper or lower triangular matrix.
-
- No test for singularity or near-singularity is included in this
- routine. Such tests must be performed before calling this routine.
-
- Parameters
- ==========
-
- UPLO - CHARACTER*1.
- On entry, UPLO specifies whether the matrix is an upper or
- lower triangular matrix as follows:
-
- UPLO = 'U' or 'u' A is an upper triangular matrix.
-
- UPLO = 'L' or 'l' A is a lower triangular matrix.
-
- Unchanged on exit.
-
- TRANS - CHARACTER*1.
- On entry, TRANS specifies the equations to be solved as
- follows:
-
- TRANS = 'N' or 'n' A*x = b.
-
- TRANS = 'T' or 't' A'*x = b.
-
- TRANS = 'C' or 'c' conjg( A' )*x = b.
-
- Unchanged on exit.
-
- DIAG - CHARACTER*1.
- On entry, DIAG specifies whether or not A is unit
- triangular as follows:
-
- DIAG = 'U' or 'u' A is assumed to be unit triangular.
-
- DIAG = 'N' or 'n' A is not assumed to be unit
- triangular.
-
- Unchanged on exit.
-
- N - INTEGER.
- On entry, N specifies the order of the matrix A.
- N must be at least zero.
- Unchanged on exit.
-
- A - COMPLEX*16 array of DIMENSION ( LDA, n ).
- Before entry with UPLO = 'U' or 'u', the leading n by n
- upper triangular part of the array A must contain the upper
- triangular matrix and the strictly lower triangular part of
- A is not referenced.
- Before entry with UPLO = 'L' or 'l', the leading n by n
- lower triangular part of the array A must contain the lower
- triangular matrix and the strictly upper triangular part of
- A is not referenced.
- Note that when DIAG = 'U' or 'u', the diagonal elements of
- A are not referenced either, but are assumed to be unity.
- Unchanged on exit.
-
- LDA - INTEGER.
- On entry, LDA specifies the first dimension of A as declared
- in the calling (sub) program. LDA must be at least
- max( 1, n ).
- Unchanged on exit.
-
- X - COMPLEX*16 array of dimension at least
- ( 1 + ( n - 1 )*abs( INCX ) ).
- Before entry, the incremented array X must contain the n
- element right-hand side vector b. On exit, X is overwritten
- with the solution vector x.
-
- INCX - INTEGER.
- On entry, INCX specifies the increment for the elements of
- X. INCX must not be zero.
- Unchanged on exit.
-
-
- Level 2 Blas routine.
-
- -- Written on 22-October-1986.
- Jack Dongarra, Argonne National Lab.
- Jeremy Du Croz, Nag Central Office.
- Sven Hammarling, Nag Central Office.
- Richard Hanson, Sandia National Labs.
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --x;
-
- /* Function Body */
- info = 0;
- if ((! lsame_(uplo, "U") && ! lsame_(uplo, "L"))) {
- info = 1;
- } else if (((! lsame_(trans, "N") && ! lsame_(trans,
- "T")) && ! lsame_(trans, "C"))) {
- info = 2;
- } else if ((! lsame_(diag, "U") && ! lsame_(diag,
- "N"))) {
- info = 3;
- } else if (*n < 0) {
- info = 4;
- } else if (*lda < max(1,*n)) {
- info = 6;
- } else if (*incx == 0) {
- info = 8;
- }
- if (info != 0) {
- xerbla_("ZTRSV ", &info);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
- noconj = lsame_(trans, "T");
- nounit = lsame_(diag, "N");
-
-/*
- Set up the start point in X if the increment is not unity. This
- will be ( N - 1 )*INCX too small for descending loops.
-*/
-
- if (*incx <= 0) {
- kx = 1 - (*n - 1) * *incx;
- } else if (*incx != 1) {
- kx = 1;
- }
-
-/*
- Start the operations. In this version the elements of A are
- accessed sequentially with one pass through A.
-*/
-
- if (lsame_(trans, "N")) {
-
-/* Form x := inv( A )*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- i__1 = j;
- if (x[i__1].r != 0. || x[i__1].i != 0.) {
- if (nounit) {
- i__1 = j;
- z_div(&z__1, &x[j], &a[j + j * a_dim1]);
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
- }
- i__1 = j;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- for (i__ = j - 1; i__ >= 1; --i__) {
- i__1 = i__;
- i__2 = i__;
- i__3 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
- z__2.i = temp.r * a[i__3].i + temp.i * a[
- i__3].r;
- z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
- z__2.i;
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
-/* L10: */
- }
- }
-/* L20: */
- }
- } else {
- jx = kx + (*n - 1) * *incx;
- for (j = *n; j >= 1; --j) {
- i__1 = jx;
- if (x[i__1].r != 0. || x[i__1].i != 0.) {
- if (nounit) {
- i__1 = jx;
- z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
- }
- i__1 = jx;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- ix = jx;
- for (i__ = j - 1; i__ >= 1; --i__) {
- ix -= *incx;
- i__1 = ix;
- i__2 = ix;
- i__3 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
- z__2.i = temp.r * a[i__3].i + temp.i * a[
- i__3].r;
- z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
- z__2.i;
- x[i__1].r = z__1.r, x[i__1].i = z__1.i;
-/* L30: */
- }
- }
- jx -= *incx;
-/* L40: */
- }
- }
- } else {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- if (nounit) {
- i__2 = j;
- z_div(&z__1, &x[j], &a[j + j * a_dim1]);
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
- }
- i__2 = j;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- i__3 = i__;
- i__4 = i__;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[
- i__5].r;
- z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
- z__2.i;
- x[i__3].r = z__1.r, x[i__3].i = z__1.i;
-/* L50: */
- }
- }
-/* L60: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = jx;
- if (x[i__2].r != 0. || x[i__2].i != 0.) {
- if (nounit) {
- i__2 = jx;
- z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
- x[i__2].r = z__1.r, x[i__2].i = z__1.i;
- }
- i__2 = jx;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- ix = jx;
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- ix += *incx;
- i__3 = ix;
- i__4 = ix;
- i__5 = i__ + j * a_dim1;
- z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
- z__2.i = temp.r * a[i__5].i + temp.i * a[
- i__5].r;
- z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
- z__2.i;
- x[i__3].r = z__1.r, x[i__3].i = z__1.i;
-/* L70: */
- }
- }
- jx += *incx;
-/* L80: */
- }
- }
- }
- } else {
-
-/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
-
- if (lsame_(uplo, "U")) {
- if (*incx == 1) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- if (noconj) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = i__;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
- i__4].i, z__2.i = a[i__3].r * x[i__4].i +
- a[i__3].i * x[i__4].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L90: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = i__;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[
- i__3].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L100: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__2 = j;
- x[i__2].r = temp.r, x[i__2].i = temp.i;
-/* L110: */
- }
- } else {
- jx = kx;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- ix = kx;
- i__2 = jx;
- temp.r = x[i__2].r, temp.i = x[i__2].i;
- if (noconj) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- i__3 = i__ + j * a_dim1;
- i__4 = ix;
- z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
- i__4].i, z__2.i = a[i__3].r * x[i__4].i +
- a[i__3].i * x[i__4].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix += *incx;
-/* L120: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__3 = ix;
- z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
- z__2.i = z__3.r * x[i__3].i + z__3.i * x[
- i__3].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix += *incx;
-/* L130: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__2 = jx;
- x[i__2].r = temp.r, x[i__2].i = temp.i;
- jx += *incx;
-/* L140: */
- }
- }
- } else {
- if (*incx == 1) {
- for (j = *n; j >= 1; --j) {
- i__1 = j;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- if (noconj) {
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- i__2 = i__ + j * a_dim1;
- i__3 = i__;
- z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
- i__3].i, z__2.i = a[i__2].r * x[i__3].i +
- a[i__2].i * x[i__3].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L150: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__2 = i__;
- z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
- z__2.i = z__3.r * x[i__2].i + z__3.i * x[
- i__2].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
-/* L160: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__1 = j;
- x[i__1].r = temp.r, x[i__1].i = temp.i;
-/* L170: */
- }
- } else {
- kx += (*n - 1) * *incx;
- jx = kx;
- for (j = *n; j >= 1; --j) {
- ix = kx;
- i__1 = jx;
- temp.r = x[i__1].r, temp.i = x[i__1].i;
- if (noconj) {
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- i__2 = i__ + j * a_dim1;
- i__3 = ix;
- z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
- i__3].i, z__2.i = a[i__2].r * x[i__3].i +
- a[i__2].i * x[i__3].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix -= *incx;
-/* L180: */
- }
- if (nounit) {
- z_div(&z__1, &temp, &a[j + j * a_dim1]);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- } else {
- i__1 = j + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- d_cnjg(&z__3, &a[i__ + j * a_dim1]);
- i__2 = ix;
- z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
- z__2.i = z__3.r * x[i__2].i + z__3.i * x[
- i__2].r;
- z__1.r = temp.r - z__2.r, z__1.i = temp.i -
- z__2.i;
- temp.r = z__1.r, temp.i = z__1.i;
- ix -= *incx;
-/* L190: */
- }
- if (nounit) {
- d_cnjg(&z__2, &a[j + j * a_dim1]);
- z_div(&z__1, &temp, &z__2);
- temp.r = z__1.r, temp.i = z__1.i;
- }
- }
- i__1 = jx;
- x[i__1].r = temp.r, x[i__1].i = temp.i;
- jx -= *incx;
-/* L200: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of ZTRSV . */
-
-} /* ztrsv_ */
-
diff --git a/numpy/linalg/bscript b/numpy/linalg/bscript
index 9905a81d3..deed4fd72 100644
--- a/numpy/linalg/bscript
+++ b/numpy/linalg/bscript
@@ -1,5 +1,3 @@
-import os
-
from bento.commands.hooks \
import \
pre_build
@@ -10,15 +8,19 @@ def pbuild(context):
def build_lapack_lite(extension):
kw = {}
+ kw["use"] = "npymath"
if bld.env.HAS_LAPACK:
for s in ['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c',
'blas_lite.c', 'dlamch.c', 'f2c_lite.c']:
- extension.sources.pop(extension.sources.index(s))
- kw["uselib"] = "LAPACK"
+ extension.sources.pop(extension.sources.index('lapack_lite/' + s))
+ kw["use"] = "npymath LAPACK"
includes = ["../core/include", "../core/include/numpy", "../core",
"../core/src/private"]
return context.default_builder(extension,
includes=includes,
**kw)
+
context.register_builder("lapack_lite", build_lapack_lite)
+ context.register_builder("_umath_linalg", build_lapack_lite)
+
diff --git a/numpy/linalg/dlapack_lite.c b/numpy/linalg/dlapack_lite.c
deleted file mode 100644
index d2c1d8129..000000000
--- a/numpy/linalg/dlapack_lite.c
+++ /dev/null
@@ -1,36008 +0,0 @@
-#define MAXITERLOOPS 100
-
-/*
-NOTE: This is generated code. Look in Misc/lapack_lite for information on
- remaking this file.
-*/
-#include "f2c.h"
-
-#ifdef HAVE_CONFIG
-#include "config.h"
-#else
-extern doublereal dlamch_(char *);
-#define EPSILON dlamch_("Epsilon")
-#define SAFEMINIMUM dlamch_("Safe minimum")
-#define PRECISION dlamch_("Precision")
-#define BASE dlamch_("Base")
-#endif
-
-extern doublereal dlapy2_(doublereal *x, doublereal *y);
-
-
-
-/* Table of constant values */
-
-static integer c__9 = 9;
-static integer c__0 = 0;
-static doublereal c_b15 = 1.;
-static integer c__1 = 1;
-static doublereal c_b29 = 0.;
-static doublereal c_b94 = -.125;
-static doublereal c_b151 = -1.;
-static integer c_n1 = -1;
-static integer c__3 = 3;
-static integer c__2 = 2;
-static integer c__8 = 8;
-static integer c__4 = 4;
-static integer c__65 = 65;
-static integer c__6 = 6;
-static integer c__15 = 15;
-static logical c_false = FALSE_;
-static integer c__10 = 10;
-static integer c__11 = 11;
-static doublereal c_b2804 = 2.;
-static logical c_true = TRUE_;
-static real c_b3825 = 0.f;
-static real c_b3826 = 1.f;
-
-/* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal *
- d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt,
- integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer *
- iwork, integer *info)
-{
- /* System generated locals */
- integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
- doublereal d__1;
-
- /* Builtin functions */
- double d_sign(doublereal *, doublereal *), log(doublereal);
-
- /* Local variables */
- static integer i__, j, k;
- static doublereal p, r__;
- static integer z__, ic, ii, kk;
- static doublereal cs;
- static integer is, iu;
- static doublereal sn;
- static integer nm1;
- static doublereal eps;
- static integer ivt, difl, difr, ierr, perm, mlvl, sqre;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *
- , doublereal *, integer *), dswap_(integer *, doublereal *,
- integer *, doublereal *, integer *);
- static integer poles, iuplo, nsize, start;
- extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- integer *, integer *, doublereal *, integer *);
-
- extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, integer *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- integer *), dlascl_(char *, integer *, integer *, doublereal *,
- doublereal *, integer *, integer *, doublereal *, integer *,
- integer *), dlasdq_(char *, integer *, integer *, integer
- *, integer *, integer *, doublereal *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *), dlaset_(char *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static integer givcol;
- extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
- static integer icompq;
- static doublereal orgnrm;
- static integer givnum, givptr, qstart, smlsiz, wstart, smlszp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- December 1, 1999
-
-
- Purpose
- =======
-
- DBDSDC computes the singular value decomposition (SVD) of a real
- N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT,
- using a divide and conquer method, where S is a diagonal matrix
- with non-negative diagonal elements (the singular values of B), and
- U and VT are orthogonal matrices of left and right singular vectors,
- respectively. DBDSDC can be used to compute all singular values,
- and optionally, singular vectors or singular vectors in compact form.
-
- This code makes very mild assumptions about floating point
- arithmetic. It will work on machines with a guard digit in
- add/subtract, or on those binary machines without guard digits
- which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
- It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none. See DLASD3 for details.
-
- The code currently call DLASDQ if singular values only are desired.
- However, it can be slightly modified to compute singular values
- using the divide and conquer method.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- = 'U': B is upper bidiagonal.
- = 'L': B is lower bidiagonal.
-
- COMPQ (input) CHARACTER*1
- Specifies whether singular vectors are to be computed
- as follows:
- = 'N': Compute singular values only;
- = 'P': Compute singular values and compute singular
- vectors in compact form;
- = 'I': Compute singular values and singular vectors.
-
- N (input) INTEGER
- The order of the matrix B. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the n diagonal elements of the bidiagonal matrix B.
- On exit, if INFO=0, the singular values of B.
-
- E (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the elements of E contain the offdiagonal
- elements of the bidiagonal matrix whose SVD is desired.
- On exit, E has been destroyed.
-
- U (output) DOUBLE PRECISION array, dimension (LDU,N)
- If COMPQ = 'I', then:
- On exit, if INFO = 0, U contains the left singular vectors
- of the bidiagonal matrix.
- For other values of COMPQ, U is not referenced.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= 1.
- If singular vectors are desired, then LDU >= max( 1, N ).
-
- VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
- If COMPQ = 'I', then:
- On exit, if INFO = 0, VT' contains the right singular
- vectors of the bidiagonal matrix.
- For other values of COMPQ, VT is not referenced.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT. LDVT >= 1.
- If singular vectors are desired, then LDVT >= max( 1, N ).
-
- Q (output) DOUBLE PRECISION array, dimension (LDQ)
- If COMPQ = 'P', then:
- On exit, if INFO = 0, Q and IQ contain the left
- and right singular vectors in a compact form,
- requiring O(N log N) space instead of 2*N**2.
- In particular, Q contains all the DOUBLE PRECISION data in
- LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1))))
- words of memory, where SMLSIZ is returned by ILAENV and
- is equal to the maximum size of the subproblems at the
- bottom of the computation tree (usually about 25).
- For other values of COMPQ, Q is not referenced.
-
- IQ (output) INTEGER array, dimension (LDIQ)
- If COMPQ = 'P', then:
- On exit, if INFO = 0, Q and IQ contain the left
- and right singular vectors in a compact form,
- requiring O(N log N) space instead of 2*N**2.
- In particular, IQ contains all INTEGER data in
- LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1))))
- words of memory, where SMLSIZ is returned by ILAENV and
- is equal to the maximum size of the subproblems at the
- bottom of the computation tree (usually about 25).
- For other values of COMPQ, IQ is not referenced.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (LWORK)
- If COMPQ = 'N' then LWORK >= (4 * N).
- If COMPQ = 'P' then LWORK >= (6 * N).
- If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N).
-
- IWORK (workspace) INTEGER array, dimension (8*N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: The algorithm failed to compute an singular value.
- The update process of divide and conquer failed.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- --q;
- --iq;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- iuplo = 0;
- if (lsame_(uplo, "U")) {
- iuplo = 1;
- }
- if (lsame_(uplo, "L")) {
- iuplo = 2;
- }
- if (lsame_(compq, "N")) {
- icompq = 0;
- } else if (lsame_(compq, "P")) {
- icompq = 1;
- } else if (lsame_(compq, "I")) {
- icompq = 2;
- } else {
- icompq = -1;
- }
- if (iuplo == 0) {
- *info = -1;
- } else if (icompq < 0) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*ldu < 1 || (icompq == 2 && *ldu < *n)) {
- *info = -7;
- } else if (*ldvt < 1 || (icompq == 2 && *ldvt < *n)) {
- *info = -9;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DBDSDC", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
- smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, (
- ftnlen)6, (ftnlen)1);
- if (*n == 1) {
- if (icompq == 1) {
- q[1] = d_sign(&c_b15, &d__[1]);
- q[smlsiz * *n + 1] = 1.;
- } else if (icompq == 2) {
- u[u_dim1 + 1] = d_sign(&c_b15, &d__[1]);
- vt[vt_dim1 + 1] = 1.;
- }
- d__[1] = abs(d__[1]);
- return 0;
- }
- nm1 = *n - 1;
-
-/*
- If matrix lower bidiagonal, rotate to be upper bidiagonal
- by applying Givens rotations on the left
-*/
-
- wstart = 1;
- qstart = 3;
- if (icompq == 1) {
- dcopy_(n, &d__[1], &c__1, &q[1], &c__1);
- i__1 = *n - 1;
- dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
- }
- if (iuplo == 2) {
- qstart = 5;
- wstart = ((*n) << (1)) - 1;
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
- d__[i__] = r__;
- e[i__] = sn * d__[i__ + 1];
- d__[i__ + 1] = cs * d__[i__ + 1];
- if (icompq == 1) {
- q[i__ + ((*n) << (1))] = cs;
- q[i__ + *n * 3] = sn;
- } else if (icompq == 2) {
- work[i__] = cs;
- work[nm1 + i__] = -sn;
- }
-/* L10: */
- }
- }
-
-/* If ICOMPQ = 0, use DLASDQ to compute the singular values. */
-
- if (icompq == 0) {
- dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
- vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
- wstart], info);
- goto L40;
- }
-
-/*
- If N is smaller than the minimum divide size SMLSIZ, then solve
- the problem with another solver.
-*/
-
- if (*n <= smlsiz) {
- if (icompq == 2) {
- dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
- dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
- dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
- , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
- wstart], info);
- } else if (icompq == 1) {
- iu = 1;
- ivt = iu + *n;
- dlaset_("A", n, n, &c_b29, &c_b15, &q[iu + (qstart - 1) * *n], n);
- dlaset_("A", n, n, &c_b29, &c_b15, &q[ivt + (qstart - 1) * *n], n);
- dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
- qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
- iu + (qstart - 1) * *n], n, &work[wstart], info);
- }
- goto L40;
- }
-
- if (icompq == 2) {
- dlaset_("A", n, n, &c_b29, &c_b15, &u[u_offset], ldu);
- dlaset_("A", n, n, &c_b29, &c_b15, &vt[vt_offset], ldvt);
- }
-
-/* Scale. */
-
- orgnrm = dlanst_("M", n, &d__[1], &e[1]);
- if (orgnrm == 0.) {
- return 0;
- }
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, &ierr);
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1, &
- ierr);
-
- eps = EPSILON;
-
- mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) /
- log(2.)) + 1;
- smlszp = smlsiz + 1;
-
- if (icompq == 1) {
- iu = 1;
- ivt = smlsiz + 1;
- difl = ivt + smlszp;
- difr = difl + mlvl;
- z__ = difr + ((mlvl) << (1));
- ic = z__ + mlvl;
- is = ic + 1;
- poles = is + 1;
- givnum = poles + ((mlvl) << (1));
-
- k = 1;
- givptr = 2;
- perm = 3;
- givcol = perm + mlvl;
- }
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = d__[i__], abs(d__1)) < eps) {
- d__[i__] = d_sign(&eps, &d__[i__]);
- }
-/* L20: */
- }
-
- start = 1;
- sqre = 0;
-
- i__1 = nm1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
-
-/*
- Subproblem found. First determine its size and then
- apply divide and conquer on it.
-*/
-
- if (i__ < nm1) {
-
-/* A subproblem with E(I) small for I < NM1. */
-
- nsize = i__ - start + 1;
- } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
-
-/* A subproblem with E(NM1) not too small but I = NM1. */
-
- nsize = *n - start + 1;
- } else {
-
-/*
- A subproblem with E(NM1) small. This implies an
- 1-by-1 subproblem at D(N). Solve this 1-by-1 problem
- first.
-*/
-
- nsize = i__ - start + 1;
- if (icompq == 2) {
- u[*n + *n * u_dim1] = d_sign(&c_b15, &d__[*n]);
- vt[*n + *n * vt_dim1] = 1.;
- } else if (icompq == 1) {
- q[*n + (qstart - 1) * *n] = d_sign(&c_b15, &d__[*n]);
- q[*n + (smlsiz + qstart - 1) * *n] = 1.;
- }
- d__[*n] = (d__1 = d__[*n], abs(d__1));
- }
- if (icompq == 2) {
- dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
- start * u_dim1], ldu, &vt[start + start * vt_dim1],
- ldvt, &smlsiz, &iwork[1], &work[wstart], info);
- } else {
- dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
- start], &q[start + (iu + qstart - 2) * *n], n, &q[
- start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
- &q[start + (difl + qstart - 2) * *n], &q[start + (
- difr + qstart - 2) * *n], &q[start + (z__ + qstart -
- 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
- start + givptr * *n], &iq[start + givcol * *n], n, &
- iq[start + perm * *n], &q[start + (givnum + qstart -
- 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
- start + (is + qstart - 2) * *n], &work[wstart], &
- iwork[1], info);
- if (*info != 0) {
- return 0;
- }
- }
- start = i__ + 1;
- }
-/* L30: */
- }
-
-/* Unscale */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, &ierr);
-L40:
-
-/* Use Selection Sort to minimize swaps of singular vectors */
-
- i__1 = *n;
- for (ii = 2; ii <= i__1; ++ii) {
- i__ = ii - 1;
- kk = i__;
- p = d__[i__];
- i__2 = *n;
- for (j = ii; j <= i__2; ++j) {
- if (d__[j] > p) {
- kk = j;
- p = d__[j];
- }
-/* L50: */
- }
- if (kk != i__) {
- d__[kk] = d__[i__];
- d__[i__] = p;
- if (icompq == 1) {
- iq[i__] = kk;
- } else if (icompq == 2) {
- dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
- c__1);
- dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
- }
- } else if (icompq == 1) {
- iq[i__] = i__;
- }
-/* L60: */
- }
-
-/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
-
- if (icompq == 1) {
- if (iuplo == 1) {
- iq[*n] = 1;
- } else {
- iq[*n] = 0;
- }
- }
-
-/*
- If B is lower bidiagonal, update U by those Givens rotations
- which rotated B to be upper bidiagonal
-*/
-
- if ((iuplo == 2 && icompq == 2)) {
- dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
- }
-
- return 0;
-
-/* End of DBDSDC */
-
-} /* dbdsdc_ */
-
-/* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
- nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt,
- integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *
- ldc, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
- i__2;
- doublereal d__1, d__2, d__3, d__4;
-
- /* Builtin functions */
- double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign(
- doublereal *, doublereal *);
-
- /* Local variables */
- static doublereal f, g, h__;
- static integer i__, j, m;
- static doublereal r__, cs;
- static integer ll;
- static doublereal sn, mu;
- static integer nm1, nm12, nm13, lll;
- static doublereal eps, sll, tol, abse;
- static integer idir;
- static doublereal abss;
- static integer oldm;
- static doublereal cosl;
- static integer isub, iter;
- static doublereal unfl, sinl, cosr, smin, smax, sinr;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *), dlas2_(
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *), dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- static doublereal oldcs;
- extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *);
- static integer oldll;
- static doublereal shift, sigmn, oldsn;
- extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer maxit;
- static doublereal sminl, sigmx;
- static logical lower;
- extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *,
- doublereal *, integer *), dlasv2_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *);
-
- extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *), xerbla_(char *,
- integer *);
- static doublereal sminoa, thresh;
- static logical rotate;
- static doublereal sminlo, tolmul;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DBDSQR computes the singular value decomposition (SVD) of a real
- N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P'
- denotes the transpose of P), where S is a diagonal matrix with
- non-negative diagonal elements (the singular values of B), and Q
- and P are orthogonal matrices.
-
- The routine computes S, and optionally computes U * Q, P' * VT,
- or Q' * C, for given real input matrices U, VT, and C.
-
- See "Computing Small Singular Values of Bidiagonal Matrices With
- Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
- LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11,
- no. 5, pp. 873-912, Sept 1990) and
- "Accurate singular values and differential qd algorithms," by
- B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics
- Department, University of California at Berkeley, July 1992
- for a detailed description of the algorithm.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- = 'U': B is upper bidiagonal;
- = 'L': B is lower bidiagonal.
-
- N (input) INTEGER
- The order of the matrix B. N >= 0.
-
- NCVT (input) INTEGER
- The number of columns of the matrix VT. NCVT >= 0.
-
- NRU (input) INTEGER
- The number of rows of the matrix U. NRU >= 0.
-
- NCC (input) INTEGER
- The number of columns of the matrix C. NCC >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the n diagonal elements of the bidiagonal matrix B.
- On exit, if INFO=0, the singular values of B in decreasing
- order.
-
- E (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the elements of E contain the
- offdiagonal elements of the bidiagonal matrix whose SVD
- is desired. On normal exit (INFO = 0), E is destroyed.
- If the algorithm does not converge (INFO > 0), D and E
- will contain the diagonal and superdiagonal elements of a
- bidiagonal matrix orthogonally equivalent to the one given
- as input. E(N) is used for workspace.
-
- VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
- On entry, an N-by-NCVT matrix VT.
- On exit, VT is overwritten by P' * VT.
- VT is not referenced if NCVT = 0.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT.
- LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0.
-
- U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
- On entry, an NRU-by-N matrix U.
- On exit, U is overwritten by U * Q.
- U is not referenced if NRU = 0.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= max(1,NRU).
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
- On entry, an N-by-NCC matrix C.
- On exit, C is overwritten by Q' * C.
- C is not referenced if NCC = 0.
-
- LDC (input) INTEGER
- The leading dimension of the array C.
- LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: If INFO = -i, the i-th argument had an illegal value
- > 0: the algorithm did not converge; D and E contain the
- elements of a bidiagonal matrix which is orthogonally
- similar to the input matrix B; if INFO = i, i
- elements of E have not converged to zero.
-
- Internal Parameters
- ===================
-
- TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8)))
- TOLMUL controls the convergence criterion of the QR loop.
- If it is positive, TOLMUL*EPS is the desired relative
- precision in the computed singular values.
- If it is negative, abs(TOLMUL*EPS*sigma_max) is the
- desired absolute accuracy in the computed singular
- values (corresponds to relative accuracy
- abs(TOLMUL*EPS) in the largest singular value.
- abs(TOLMUL) should be between 1 and 1/EPS, and preferably
- between 10 (for fast convergence) and .1/EPS
- (for there to be some accuracy in the results).
- Default is to lose at either one eighth or 2 of the
- available decimal digits in each computed singular value
- (whichever is smaller).
-
- MAXITR INTEGER, default = 6
- MAXITR controls the maximum number of passes of the
- algorithm through its inner loop. The algorithms stops
- (and so fails to converge) if the number of passes
- through the inner loop exceeds MAXITR*N**2.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- lower = lsame_(uplo, "L");
- if ((! lsame_(uplo, "U") && ! lower)) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*ncvt < 0) {
- *info = -3;
- } else if (*nru < 0) {
- *info = -4;
- } else if (*ncc < 0) {
- *info = -5;
- } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
- {
- *info = -9;
- } else if (*ldu < max(1,*nru)) {
- *info = -11;
- } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
- *info = -13;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DBDSQR", &i__1);
- return 0;
- }
- if (*n == 0) {
- return 0;
- }
- if (*n == 1) {
- goto L160;
- }
-
-/* ROTATE is true if any singular vectors desired, false otherwise */
-
- rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
-
-/* If no singular vectors desired, use qd algorithm */
-
- if (! rotate) {
- dlasq1_(n, &d__[1], &e[1], &work[1], info);
- return 0;
- }
-
- nm1 = *n - 1;
- nm12 = nm1 + nm1;
- nm13 = nm12 + nm1;
- idir = 0;
-
-/* Get machine constants */
-
- eps = EPSILON;
- unfl = SAFEMINIMUM;
-
-/*
- If matrix lower bidiagonal, rotate to be upper bidiagonal
- by applying Givens rotations on the left
-*/
-
- if (lower) {
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
- d__[i__] = r__;
- e[i__] = sn * d__[i__ + 1];
- d__[i__ + 1] = cs * d__[i__ + 1];
- work[i__] = cs;
- work[nm1 + i__] = sn;
-/* L10: */
- }
-
-/* Update singular vectors if desired */
-
- if (*nru > 0) {
- dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
- ldu);
- }
- if (*ncc > 0) {
- dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
- ldc);
- }
- }
-
-/*
- Compute singular values to relative accuracy TOL
- (By setting TOL to be negative, algorithm will compute
- singular values to absolute accuracy ABS(TOL)*norm(input matrix))
-
- Computing MAX
- Computing MIN
-*/
- d__3 = 100., d__4 = pow_dd(&eps, &c_b94);
- d__1 = 10., d__2 = min(d__3,d__4);
- tolmul = max(d__1,d__2);
- tol = tolmul * eps;
-
-/* Compute approximate maximum, minimum singular values */
-
- smax = 0.;
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1));
- smax = max(d__2,d__3);
-/* L20: */
- }
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1));
- smax = max(d__2,d__3);
-/* L30: */
- }
- sminl = 0.;
- if (tol >= 0.) {
-
-/* Relative accuracy desired */
-
- sminoa = abs(d__[1]);
- if (sminoa == 0.) {
- goto L50;
- }
- mu = sminoa;
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1]
- , abs(d__1))));
- sminoa = min(sminoa,mu);
- if (sminoa == 0.) {
- goto L50;
- }
-/* L40: */
- }
-L50:
- sminoa /= sqrt((doublereal) (*n));
-/* Computing MAX */
- d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
- thresh = max(d__1,d__2);
- } else {
-
-/*
- Absolute accuracy desired
-
- Computing MAX
-*/
- d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl;
- thresh = max(d__1,d__2);
- }
-
-/*
- Prepare for main iteration loop for the singular values
- (MAXIT is the maximum number of passes through the inner
- loop permitted before nonconvergence signalled.)
-*/
-
- maxit = *n * 6 * *n;
- iter = 0;
- oldll = -1;
- oldm = -1;
-
-/* M points to last element of unconverged part of matrix */
-
- m = *n;
-
-/* Begin main iteration loop */
-
-L60:
-
-/* Check for convergence or exceeding iteration count */
-
- if (m <= 1) {
- goto L160;
- }
- if (iter > maxit) {
- goto L200;
- }
-
-/* Find diagonal block of matrix to work on */
-
- if ((tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh)) {
- d__[m] = 0.;
- }
- smax = (d__1 = d__[m], abs(d__1));
- smin = smax;
- i__1 = m - 1;
- for (lll = 1; lll <= i__1; ++lll) {
- ll = m - lll;
- abss = (d__1 = d__[ll], abs(d__1));
- abse = (d__1 = e[ll], abs(d__1));
- if ((tol < 0. && abss <= thresh)) {
- d__[ll] = 0.;
- }
- if (abse <= thresh) {
- goto L80;
- }
- smin = min(smin,abss);
-/* Computing MAX */
- d__1 = max(smax,abss);
- smax = max(d__1,abse);
-/* L70: */
- }
- ll = 0;
- goto L90;
-L80:
- e[ll] = 0.;
-
-/* Matrix splits since E(LL) = 0 */
-
- if (ll == m - 1) {
-
-/* Convergence of bottom singular value, return to top of loop */
-
- --m;
- goto L60;
- }
-L90:
- ++ll;
-
-/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
-
- if (ll == m - 1) {
-
-/* 2 by 2 block, handle separately */
-
- dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
- &sinl, &cosl);
- d__[m - 1] = sigmx;
- e[m - 1] = 0.;
- d__[m] = sigmn;
-
-/* Compute singular vectors, if desired */
-
- if (*ncvt > 0) {
- drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
- cosr, &sinr);
- }
- if (*nru > 0) {
- drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
- c__1, &cosl, &sinl);
- }
- if (*ncc > 0) {
- drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
- cosl, &sinl);
- }
- m += -2;
- goto L60;
- }
-
-/*
- If working on new submatrix, choose shift direction
- (from larger end diagonal element towards smaller)
-*/
-
- if (ll > oldm || m < oldll) {
- if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) {
-
-/* Chase bulge from top (big end) to bottom (small end) */
-
- idir = 1;
- } else {
-
-/* Chase bulge from bottom (big end) to top (small end) */
-
- idir = 2;
- }
- }
-
-/* Apply convergence tests */
-
- if (idir == 1) {
-
-/*
- Run convergence test in forward direction
- First apply standard test to bottom of matrix
-*/
-
- if ((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs(
- d__1)) || (tol < 0. && (d__3 = e[m - 1], abs(d__3)) <= thresh)
- ) {
- e[m - 1] = 0.;
- goto L60;
- }
-
- if (tol >= 0.) {
-
-/*
- If relative accuracy desired,
- apply convergence criterion forward
-*/
-
- mu = (d__1 = d__[ll], abs(d__1));
- sminl = mu;
- i__1 = m - 1;
- for (lll = ll; lll <= i__1; ++lll) {
- if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
- e[lll] = 0.;
- goto L60;
- }
- sminlo = sminl;
- mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[
- lll], abs(d__1))));
- sminl = min(sminl,mu);
-/* L100: */
- }
- }
-
- } else {
-
-/*
- Run convergence test in backward direction
- First apply standard test to top of matrix
-*/
-
- if ((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(d__1)
- ) || (tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh)) {
- e[ll] = 0.;
- goto L60;
- }
-
- if (tol >= 0.) {
-
-/*
- If relative accuracy desired,
- apply convergence criterion backward
-*/
-
- mu = (d__1 = d__[m], abs(d__1));
- sminl = mu;
- i__1 = ll;
- for (lll = m - 1; lll >= i__1; --lll) {
- if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
- e[lll] = 0.;
- goto L60;
- }
- sminlo = sminl;
- mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll]
- , abs(d__1))));
- sminl = min(sminl,mu);
-/* L110: */
- }
- }
- }
- oldll = ll;
- oldm = m;
-
-/*
- Compute shift. First, test if shifting would ruin relative
- accuracy, and if so set the shift to zero.
-
- Computing MAX
-*/
- d__1 = eps, d__2 = tol * .01;
- if ((tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2))) {
-
-/* Use a zero shift to avoid loss of relative accuracy */
-
- shift = 0.;
- } else {
-
-/* Compute the shift from 2-by-2 block at end of matrix */
-
- if (idir == 1) {
- sll = (d__1 = d__[ll], abs(d__1));
- dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
- } else {
- sll = (d__1 = d__[m], abs(d__1));
- dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
- }
-
-/* Test if shift negligible, and if so set to zero */
-
- if (sll > 0.) {
-/* Computing 2nd power */
- d__1 = shift / sll;
- if (d__1 * d__1 < eps) {
- shift = 0.;
- }
- }
- }
-
-/* Increment iteration count */
-
- iter = iter + m - ll;
-
-/* If SHIFT = 0, do simplified QR iteration */
-
- if (shift == 0.) {
- if (idir == 1) {
-
-/*
- Chase bulge from top to bottom
- Save cosines and sines for later singular vector updates
-*/
-
- cs = 1.;
- oldcs = 1.;
- i__1 = m - 1;
- for (i__ = ll; i__ <= i__1; ++i__) {
- d__1 = d__[i__] * cs;
- dlartg_(&d__1, &e[i__], &cs, &sn, &r__);
- if (i__ > ll) {
- e[i__ - 1] = oldsn * r__;
- }
- d__1 = oldcs * r__;
- d__2 = d__[i__ + 1] * sn;
- dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
- work[i__ - ll + 1] = cs;
- work[i__ - ll + 1 + nm1] = sn;
- work[i__ - ll + 1 + nm12] = oldcs;
- work[i__ - ll + 1 + nm13] = oldsn;
-/* L120: */
- }
- h__ = d__[m] * cs;
- d__[m] = h__ * oldcs;
- e[m - 1] = h__ * oldsn;
-
-/* Update singular vectors */
-
- if (*ncvt > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
- ll + vt_dim1], ldvt);
- }
- if (*nru > 0) {
- i__1 = m - ll + 1;
- dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
- + 1], &u[ll * u_dim1 + 1], ldu);
- }
- if (*ncc > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
- + 1], &c__[ll + c_dim1], ldc);
- }
-
-/* Test convergence */
-
- if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
- e[m - 1] = 0.;
- }
-
- } else {
-
-/*
- Chase bulge from bottom to top
- Save cosines and sines for later singular vector updates
-*/
-
- cs = 1.;
- oldcs = 1.;
- i__1 = ll + 1;
- for (i__ = m; i__ >= i__1; --i__) {
- d__1 = d__[i__] * cs;
- dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__);
- if (i__ < m) {
- e[i__] = oldsn * r__;
- }
- d__1 = oldcs * r__;
- d__2 = d__[i__ - 1] * sn;
- dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
- work[i__ - ll] = cs;
- work[i__ - ll + nm1] = -sn;
- work[i__ - ll + nm12] = oldcs;
- work[i__ - ll + nm13] = -oldsn;
-/* L130: */
- }
- h__ = d__[ll] * cs;
- d__[ll] = h__ * oldcs;
- e[ll] = h__ * oldsn;
-
-/* Update singular vectors */
-
- if (*ncvt > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
- nm13 + 1], &vt[ll + vt_dim1], ldvt);
- }
- if (*nru > 0) {
- i__1 = m - ll + 1;
- dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
- u_dim1 + 1], ldu);
- }
- if (*ncc > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
- ll + c_dim1], ldc);
- }
-
-/* Test convergence */
-
- if ((d__1 = e[ll], abs(d__1)) <= thresh) {
- e[ll] = 0.;
- }
- }
- } else {
-
-/* Use nonzero shift */
-
- if (idir == 1) {
-
-/*
- Chase bulge from top to bottom
- Save cosines and sines for later singular vector updates
-*/
-
- f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[
- ll]) + shift / d__[ll]);
- g = e[ll];
- i__1 = m - 1;
- for (i__ = ll; i__ <= i__1; ++i__) {
- dlartg_(&f, &g, &cosr, &sinr, &r__);
- if (i__ > ll) {
- e[i__ - 1] = r__;
- }
- f = cosr * d__[i__] + sinr * e[i__];
- e[i__] = cosr * e[i__] - sinr * d__[i__];
- g = sinr * d__[i__ + 1];
- d__[i__ + 1] = cosr * d__[i__ + 1];
- dlartg_(&f, &g, &cosl, &sinl, &r__);
- d__[i__] = r__;
- f = cosl * e[i__] + sinl * d__[i__ + 1];
- d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
- if (i__ < m - 1) {
- g = sinl * e[i__ + 1];
- e[i__ + 1] = cosl * e[i__ + 1];
- }
- work[i__ - ll + 1] = cosr;
- work[i__ - ll + 1 + nm1] = sinr;
- work[i__ - ll + 1 + nm12] = cosl;
- work[i__ - ll + 1 + nm13] = sinl;
-/* L140: */
- }
- e[m - 1] = f;
-
-/* Update singular vectors */
-
- if (*ncvt > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
- ll + vt_dim1], ldvt);
- }
- if (*nru > 0) {
- i__1 = m - ll + 1;
- dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
- + 1], &u[ll * u_dim1 + 1], ldu);
- }
- if (*ncc > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
- + 1], &c__[ll + c_dim1], ldc);
- }
-
-/* Test convergence */
-
- if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
- e[m - 1] = 0.;
- }
-
- } else {
-
-/*
- Chase bulge from bottom to top
- Save cosines and sines for later singular vector updates
-*/
-
- f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b15, &d__[m]
- ) + shift / d__[m]);
- g = e[m - 1];
- i__1 = ll + 1;
- for (i__ = m; i__ >= i__1; --i__) {
- dlartg_(&f, &g, &cosr, &sinr, &r__);
- if (i__ < m) {
- e[i__] = r__;
- }
- f = cosr * d__[i__] + sinr * e[i__ - 1];
- e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
- g = sinr * d__[i__ - 1];
- d__[i__ - 1] = cosr * d__[i__ - 1];
- dlartg_(&f, &g, &cosl, &sinl, &r__);
- d__[i__] = r__;
- f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
- d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
- if (i__ > ll + 1) {
- g = sinl * e[i__ - 2];
- e[i__ - 2] = cosl * e[i__ - 2];
- }
- work[i__ - ll] = cosr;
- work[i__ - ll + nm1] = -sinr;
- work[i__ - ll + nm12] = cosl;
- work[i__ - ll + nm13] = -sinl;
-/* L150: */
- }
- e[ll] = f;
-
-/* Test convergence */
-
- if ((d__1 = e[ll], abs(d__1)) <= thresh) {
- e[ll] = 0.;
- }
-
-/* Update singular vectors if desired */
-
- if (*ncvt > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
- nm13 + 1], &vt[ll + vt_dim1], ldvt);
- }
- if (*nru > 0) {
- i__1 = m - ll + 1;
- dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
- u_dim1 + 1], ldu);
- }
- if (*ncc > 0) {
- i__1 = m - ll + 1;
- dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
- ll + c_dim1], ldc);
- }
- }
- }
-
-/* QR iteration finished, go back and check convergence */
-
- goto L60;
-
-/* All singular values converged, so make them positive */
-
-L160:
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (d__[i__] < 0.) {
- d__[i__] = -d__[i__];
-
-/* Change sign of singular vectors, if desired */
-
- if (*ncvt > 0) {
- dscal_(ncvt, &c_b151, &vt[i__ + vt_dim1], ldvt);
- }
- }
-/* L170: */
- }
-
-/*
- Sort the singular values into decreasing order (insertion sort on
- singular values, but only one transposition per singular vector)
-*/
-
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Scan for smallest D(I) */
-
- isub = 1;
- smin = d__[1];
- i__2 = *n + 1 - i__;
- for (j = 2; j <= i__2; ++j) {
- if (d__[j] <= smin) {
- isub = j;
- smin = d__[j];
- }
-/* L180: */
- }
- if (isub != *n + 1 - i__) {
-
-/* Swap singular values and vectors */
-
- d__[isub] = d__[*n + 1 - i__];
- d__[*n + 1 - i__] = smin;
- if (*ncvt > 0) {
- dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
- vt_dim1], ldvt);
- }
- if (*nru > 0) {
- dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
- u_dim1 + 1], &c__1);
- }
- if (*ncc > 0) {
- dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
- c_dim1], ldc);
- }
- }
-/* L190: */
- }
- goto L220;
-
-/* Maximum number of iterations exceeded, failure to converge */
-
-L200:
- *info = 0;
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (e[i__] != 0.) {
- ++(*info);
- }
-/* L210: */
- }
-L220:
- return 0;
-
-/* End of DBDSQR */
-
-} /* dbdsqr_ */
-
-/* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo,
- integer *ihi, doublereal *scale, integer *m, doublereal *v, integer *
- ldv, integer *info)
-{
- /* System generated locals */
- integer v_dim1, v_offset, i__1;
-
- /* Local variables */
- static integer i__, k;
- static doublereal s;
- static integer ii;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static logical leftv;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical rightv;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DGEBAK forms the right or left eigenvectors of a real general matrix
- by backward transformation on the computed eigenvectors of the
- balanced matrix output by DGEBAL.
-
- Arguments
- =========
-
- JOB (input) CHARACTER*1
- Specifies the type of backward transformation required:
- = 'N', do nothing, return immediately;
- = 'P', do backward transformation for permutation only;
- = 'S', do backward transformation for scaling only;
- = 'B', do backward transformations for both permutation and
- scaling.
- JOB must be the same as the argument JOB supplied to DGEBAL.
-
- SIDE (input) CHARACTER*1
- = 'R': V contains right eigenvectors;
- = 'L': V contains left eigenvectors.
-
- N (input) INTEGER
- The number of rows of the matrix V. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- The integers ILO and IHI determined by DGEBAL.
- 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
-
- SCALE (input) DOUBLE PRECISION array, dimension (N)
- Details of the permutation and scaling factors, as returned
- by DGEBAL.
-
- M (input) INTEGER
- The number of columns of the matrix V. M >= 0.
-
- V (input/output) DOUBLE PRECISION array, dimension (LDV,M)
- On entry, the matrix of right or left eigenvectors to be
- transformed, as returned by DHSEIN or DTREVC.
- On exit, V is overwritten by the transformed eigenvectors.
-
- LDV (input) INTEGER
- The leading dimension of the array V. LDV >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- =====================================================================
-
-
- Decode and Test the input parameters
-*/
-
- /* Parameter adjustments */
- --scale;
- v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
- v -= v_offset;
-
- /* Function Body */
- rightv = lsame_(side, "R");
- leftv = lsame_(side, "L");
-
- *info = 0;
- if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
- && ! lsame_(job, "B"))) {
- *info = -1;
- } else if ((! rightv && ! leftv)) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
- *info = -4;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
- *info = -5;
- } else if (*m < 0) {
- *info = -7;
- } else if (*ldv < max(1,*n)) {
- *info = -9;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEBAK", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
- if (*m == 0) {
- return 0;
- }
- if (lsame_(job, "N")) {
- return 0;
- }
-
- if (*ilo == *ihi) {
- goto L30;
- }
-
-/* Backward balance */
-
- if (lsame_(job, "S") || lsame_(job, "B")) {
-
- if (rightv) {
- i__1 = *ihi;
- for (i__ = *ilo; i__ <= i__1; ++i__) {
- s = scale[i__];
- dscal_(m, &s, &v[i__ + v_dim1], ldv);
-/* L10: */
- }
- }
-
- if (leftv) {
- i__1 = *ihi;
- for (i__ = *ilo; i__ <= i__1; ++i__) {
- s = 1. / scale[i__];
- dscal_(m, &s, &v[i__ + v_dim1], ldv);
-/* L20: */
- }
- }
-
- }
-
-/*
- Backward permutation
-
- For I = ILO-1 step -1 until 1,
- IHI+1 step 1 until N do --
-*/
-
-L30:
- if (lsame_(job, "P") || lsame_(job, "B")) {
- if (rightv) {
- i__1 = *n;
- for (ii = 1; ii <= i__1; ++ii) {
- i__ = ii;
- if ((i__ >= *ilo && i__ <= *ihi)) {
- goto L40;
- }
- if (i__ < *ilo) {
- i__ = *ilo - ii;
- }
- k = (integer) scale[i__];
- if (k == i__) {
- goto L40;
- }
- dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
-L40:
- ;
- }
- }
-
- if (leftv) {
- i__1 = *n;
- for (ii = 1; ii <= i__1; ++ii) {
- i__ = ii;
- if ((i__ >= *ilo && i__ <= *ihi)) {
- goto L50;
- }
- if (i__ < *ilo) {
- i__ = *ilo - ii;
- }
- k = (integer) scale[i__];
- if (k == i__) {
- goto L50;
- }
- dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
-L50:
- ;
- }
- }
- }
-
- return 0;
-
-/* End of DGEBAK */
-
-} /* dgebak_ */
-
-/* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer *
- lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
- doublereal d__1, d__2;
-
- /* Local variables */
- static doublereal c__, f, g;
- static integer i__, j, k, l, m;
- static doublereal r__, s, ca, ra;
- static integer ica, ira, iexc;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static doublereal sfmin1, sfmin2, sfmax1, sfmax2;
-
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical noconv;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DGEBAL balances a general real matrix A. This involves, first,
- permuting A by a similarity transformation to isolate eigenvalues
- in the first 1 to ILO-1 and last IHI+1 to N elements on the
- diagonal; and second, applying a diagonal similarity transformation
- to rows and columns ILO to IHI to make the rows and columns as
- close in norm as possible. Both steps are optional.
-
- Balancing may reduce the 1-norm of the matrix, and improve the
- accuracy of the computed eigenvalues and/or eigenvectors.
-
- Arguments
- =========
-
- JOB (input) CHARACTER*1
- Specifies the operations to be performed on A:
- = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
- for i = 1,...,N;
- = 'P': permute only;
- = 'S': scale only;
- = 'B': both permute and scale.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the input matrix A.
- On exit, A is overwritten by the balanced matrix.
- If JOB = 'N', A is not referenced.
- See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- ILO (output) INTEGER
- IHI (output) INTEGER
- ILO and IHI are set to integers such that on exit
- A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
- If JOB = 'N' or 'S', ILO = 1 and IHI = N.
-
- SCALE (output) DOUBLE PRECISION array, dimension (N)
- Details of the permutations and scaling factors applied to
- A. If P(j) is the index of the row and column interchanged
- with row and column j and D(j) is the scaling factor
- applied to row and column j, then
- SCALE(j) = P(j) for j = 1,...,ILO-1
- = D(j) for j = ILO,...,IHI
- = P(j) for j = IHI+1,...,N.
- The order in which the interchanges are made is N to IHI+1,
- then 1 to ILO-1.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- The permutations consist of row and column interchanges which put
- the matrix in the form
-
- ( T1 X Y )
- P A P = ( 0 B Z )
- ( 0 0 T2 )
-
- where T1 and T2 are upper triangular matrices whose eigenvalues lie
- along the diagonal. The column indices ILO and IHI mark the starting
- and ending columns of the submatrix B. Balancing consists of applying
- a diagonal similarity transformation inv(D) * B * D to make the
- 1-norms of each row of B and its corresponding column nearly equal.
- The output matrix is
-
- ( T1 X*D Y )
- ( 0 inv(D)*B*D inv(D)*Z ).
- ( 0 0 T2 )
-
- Information about the permutations P and the diagonal matrix D is
- returned in the vector SCALE.
-
- This subroutine is based on the EISPACK routine BALANC.
-
- Modified by Tzu-Yi Chen, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --scale;
-
- /* Function Body */
- *info = 0;
- if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
- && ! lsame_(job, "B"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEBAL", &i__1);
- return 0;
- }
-
- k = 1;
- l = *n;
-
- if (*n == 0) {
- goto L210;
- }
-
- if (lsame_(job, "N")) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- scale[i__] = 1.;
-/* L10: */
- }
- goto L210;
- }
-
- if (lsame_(job, "S")) {
- goto L120;
- }
-
-/* Permutation to isolate eigenvalues if possible */
-
- goto L50;
-
-/* Row and column exchange. */
-
-L20:
- scale[m] = (doublereal) j;
- if (j == m) {
- goto L30;
- }
-
- dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
- i__1 = *n - k + 1;
- dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
-
-L30:
- switch (iexc) {
- case 1: goto L40;
- case 2: goto L80;
- }
-
-/* Search for rows isolating an eigenvalue and push them down. */
-
-L40:
- if (l == 1) {
- goto L210;
- }
- --l;
-
-L50:
- for (j = l; j >= 1; --j) {
-
- i__1 = l;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (i__ == j) {
- goto L60;
- }
- if (a[j + i__ * a_dim1] != 0.) {
- goto L70;
- }
-L60:
- ;
- }
-
- m = l;
- iexc = 1;
- goto L20;
-L70:
- ;
- }
-
- goto L90;
-
-/* Search for columns isolating an eigenvalue and push them left. */
-
-L80:
- ++k;
-
-L90:
- i__1 = l;
- for (j = k; j <= i__1; ++j) {
-
- i__2 = l;
- for (i__ = k; i__ <= i__2; ++i__) {
- if (i__ == j) {
- goto L100;
- }
- if (a[i__ + j * a_dim1] != 0.) {
- goto L110;
- }
-L100:
- ;
- }
-
- m = k;
- iexc = 2;
- goto L20;
-L110:
- ;
- }
-
-L120:
- i__1 = l;
- for (i__ = k; i__ <= i__1; ++i__) {
- scale[i__] = 1.;
-/* L130: */
- }
-
- if (lsame_(job, "P")) {
- goto L210;
- }
-
-/*
- Balance the submatrix in rows K to L.
-
- Iterative loop for norm reduction
-*/
-
- sfmin1 = SAFEMINIMUM / PRECISION;
- sfmax1 = 1. / sfmin1;
- sfmin2 = sfmin1 * 8.;
- sfmax2 = 1. / sfmin2;
-L140:
- noconv = FALSE_;
-
- i__1 = l;
- for (i__ = k; i__ <= i__1; ++i__) {
- c__ = 0.;
- r__ = 0.;
-
- i__2 = l;
- for (j = k; j <= i__2; ++j) {
- if (j == i__) {
- goto L150;
- }
- c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1));
- r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1));
-L150:
- ;
- }
- ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
- ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1));
- i__2 = *n - k + 1;
- ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda);
- ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1));
-
-/* Guard against zero C or R due to underflow. */
-
- if (c__ == 0. || r__ == 0.) {
- goto L200;
- }
- g = r__ / 8.;
- f = 1.;
- s = c__ + r__;
-L160:
-/* Computing MAX */
- d__1 = max(f,c__);
-/* Computing MIN */
- d__2 = min(r__,g);
- if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
- goto L170;
- }
- f *= 8.;
- c__ *= 8.;
- ca *= 8.;
- r__ /= 8.;
- g /= 8.;
- ra /= 8.;
- goto L160;
-
-L170:
- g = c__ / 8.;
-L180:
-/* Computing MIN */
- d__1 = min(f,c__), d__1 = min(d__1,g);
- if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
- goto L190;
- }
- f /= 8.;
- c__ /= 8.;
- g /= 8.;
- ca /= 8.;
- r__ *= 8.;
- ra *= 8.;
- goto L180;
-
-/* Now balance. */
-
-L190:
- if (c__ + r__ >= s * .95) {
- goto L200;
- }
- if ((f < 1. && scale[i__] < 1.)) {
- if (f * scale[i__] <= sfmin1) {
- goto L200;
- }
- }
- if ((f > 1. && scale[i__] > 1.)) {
- if (scale[i__] >= sfmax1 / f) {
- goto L200;
- }
- }
- g = 1. / f;
- scale[i__] *= f;
- noconv = TRUE_;
-
- i__2 = *n - k + 1;
- dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
- dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
-
-L200:
- ;
- }
-
- if (noconv) {
- goto L140;
- }
-
-L210:
- *ilo = k;
- *ihi = l;
-
- return 0;
-
-/* End of DGEBAL */
-
-} /* dgebal_ */
-
-/* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
- taup, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DGEBD2 reduces a real general m by n matrix A to upper or lower
- bidiagonal form B by an orthogonal transformation: Q' * A * P = B.
-
- If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows in the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns in the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the m by n general matrix to be reduced.
- On exit,
- if m >= n, the diagonal and the first superdiagonal are
- overwritten with the upper bidiagonal matrix B; the
- elements below the diagonal, with the array TAUQ, represent
- the orthogonal matrix Q as a product of elementary
- reflectors, and the elements above the first superdiagonal,
- with the array TAUP, represent the orthogonal matrix P as
- a product of elementary reflectors;
- if m < n, the diagonal and the first subdiagonal are
- overwritten with the lower bidiagonal matrix B; the
- elements below the first subdiagonal, with the array TAUQ,
- represent the orthogonal matrix Q as a product of
- elementary reflectors, and the elements above the diagonal,
- with the array TAUP, represent the orthogonal matrix P as
- a product of elementary reflectors.
- See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- D (output) DOUBLE PRECISION array, dimension (min(M,N))
- The diagonal elements of the bidiagonal matrix B:
- D(i) = A(i,i).
-
- E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
- The off-diagonal elements of the bidiagonal matrix B:
- if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
- if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
-
- TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix Q. See Further Details.
-
- TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix P. See Further Details.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N))
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- The matrices Q and P are represented as products of elementary
- reflectors:
-
- If m >= n,
-
- Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
-
- Each H(i) and G(i) has the form:
-
- H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
-
- where tauq and taup are real scalars, and v and u are real vectors;
- v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
- u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
- tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- If m < n,
-
- Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
-
- Each H(i) and G(i) has the form:
-
- H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
-
- where tauq and taup are real scalars, and v and u are real vectors;
- v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
- u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
- tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- The contents of A on exit are illustrated by the following examples:
-
- m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
-
- ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
- ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
- ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
- ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
- ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
- ( v1 v2 v3 v4 v5 )
-
- where d and e denote diagonal and off-diagonal elements of B, vi
- denotes an element of the vector defining H(i), and ui an element of
- the vector defining G(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --d__;
- --e;
- --tauq;
- --taup;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info < 0) {
- i__1 = -(*info);
- xerbla_("DGEBD2", &i__1);
- return 0;
- }
-
- if (*m >= *n) {
-
-/* Reduce to upper bidiagonal form */
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
-
- i__2 = *m - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
- a_dim1], &c__1, &tauq[i__]);
- d__[i__] = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
-
-/* Apply H(i) to A(i:m,i+1:n) from the left */
-
- i__2 = *m - i__ + 1;
- i__3 = *n - i__;
- dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tauq[
- i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
- a[i__ + i__ * a_dim1] = d__[i__];
-
- if (i__ < *n) {
-
-/*
- Generate elementary reflector G(i) to annihilate
- A(i,i+2:n)
-*/
-
- i__2 = *n - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
- i__3,*n) * a_dim1], lda, &taup[i__]);
- e[i__] = a[i__ + (i__ + 1) * a_dim1];
- a[i__ + (i__ + 1) * a_dim1] = 1.;
-
-/* Apply G(i) to A(i+1:m,i+1:n) from the right */
-
- i__2 = *m - i__;
- i__3 = *n - i__;
- dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
- lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
- lda, &work[1]);
- a[i__ + (i__ + 1) * a_dim1] = e[i__];
- } else {
- taup[i__] = 0.;
- }
-/* L10: */
- }
- } else {
-
-/* Reduce to lower bidiagonal form */
-
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
-
- i__2 = *n - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
- a_dim1], lda, &taup[i__]);
- d__[i__] = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
-
-/* Apply G(i) to A(i+1:m,i:n) from the right */
-
- i__2 = *m - i__;
- i__3 = *n - i__ + 1;
-/* Computing MIN */
- i__4 = i__ + 1;
- dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
- i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
- a[i__ + i__ * a_dim1] = d__[i__];
-
- if (i__ < *m) {
-
-/*
- Generate elementary reflector H(i) to annihilate
- A(i+2:m,i)
-*/
-
- i__2 = *m - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
- i__ * a_dim1], &c__1, &tauq[i__]);
- e[i__] = a[i__ + 1 + i__ * a_dim1];
- a[i__ + 1 + i__ * a_dim1] = 1.;
-
-/* Apply H(i) to A(i+1:m,i+1:n) from the left */
-
- i__2 = *m - i__;
- i__3 = *n - i__;
- dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
- c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
- lda, &work[1]);
- a[i__ + 1 + i__ * a_dim1] = e[i__];
- } else {
- tauq[i__] = 0.;
- }
-/* L20: */
- }
- }
- return 0;
-
-/* End of DGEBD2 */
-
-} /* dgebd2_ */
-
-/* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
- taup, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__, j, nb, nx;
- static doublereal ws;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer nbmin, iinfo, minmn;
- extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *), dlabrd_(integer *, integer *, integer *
- , doublereal *, integer *, doublereal *, doublereal *, doublereal
- *, doublereal *, doublereal *, integer *, doublereal *, integer *)
- , xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwrkx, ldwrky, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DGEBRD reduces a general real M-by-N matrix A to upper or lower
- bidiagonal form B by an orthogonal transformation: Q**T * A * P = B.
-
- If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows in the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns in the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N general matrix to be reduced.
- On exit,
- if m >= n, the diagonal and the first superdiagonal are
- overwritten with the upper bidiagonal matrix B; the
- elements below the diagonal, with the array TAUQ, represent
- the orthogonal matrix Q as a product of elementary
- reflectors, and the elements above the first superdiagonal,
- with the array TAUP, represent the orthogonal matrix P as
- a product of elementary reflectors;
- if m < n, the diagonal and the first subdiagonal are
- overwritten with the lower bidiagonal matrix B; the
- elements below the first subdiagonal, with the array TAUQ,
- represent the orthogonal matrix Q as a product of
- elementary reflectors, and the elements above the diagonal,
- with the array TAUP, represent the orthogonal matrix P as
- a product of elementary reflectors.
- See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- D (output) DOUBLE PRECISION array, dimension (min(M,N))
- The diagonal elements of the bidiagonal matrix B:
- D(i) = A(i,i).
-
- E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
- The off-diagonal elements of the bidiagonal matrix B:
- if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
- if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
-
- TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix Q. See Further Details.
-
- TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix P. See Further Details.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The length of the array WORK. LWORK >= max(1,M,N).
- For optimum performance LWORK >= (M+N)*NB, where NB
- is the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- The matrices Q and P are represented as products of elementary
- reflectors:
-
- If m >= n,
-
- Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
-
- Each H(i) and G(i) has the form:
-
- H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
-
- where tauq and taup are real scalars, and v and u are real vectors;
- v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
- u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
- tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- If m < n,
-
- Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
-
- Each H(i) and G(i) has the form:
-
- H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
-
- where tauq and taup are real scalars, and v and u are real vectors;
- v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
- u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
- tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- The contents of A on exit are illustrated by the following examples:
-
- m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
-
- ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
- ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
- ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
- ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
- ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
- ( v1 v2 v3 v4 v5 )
-
- where d and e denote diagonal and off-diagonal elements of B, vi
- denotes an element of the vector defining H(i), and ui an element of
- the vector defining G(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --d__;
- --e;
- --tauq;
- --taup;
- --work;
-
- /* Function Body */
- *info = 0;
-/* Computing MAX */
- i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nb = max(i__1,i__2);
- lwkopt = (*m + *n) * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- } else /* if(complicated condition) */ {
-/* Computing MAX */
- i__1 = max(1,*m);
- if ((*lwork < max(i__1,*n) && ! lquery)) {
- *info = -10;
- }
- }
- if (*info < 0) {
- i__1 = -(*info);
- xerbla_("DGEBRD", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- minmn = min(*m,*n);
- if (minmn == 0) {
- work[1] = 1.;
- return 0;
- }
-
- ws = (doublereal) max(*m,*n);
- ldwrkx = *m;
- ldwrky = *n;
-
- if ((nb > 1 && nb < minmn)) {
-
-/*
- Set the crossover point NX.
-
- Computing MAX
-*/
- i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
-
-/* Determine when to switch from blocked to unblocked code. */
-
- if (nx < minmn) {
- ws = (doublereal) ((*m + *n) * nb);
- if ((doublereal) (*lwork) < ws) {
-
-/*
- Not enough work space for the optimal NB, consider using
- a smaller block size.
-*/
-
- nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- if (*lwork >= (*m + *n) * nbmin) {
- nb = *lwork / (*m + *n);
- } else {
- nb = 1;
- nx = minmn;
- }
- }
- }
- } else {
- nx = minmn;
- }
-
- i__1 = minmn - nx;
- i__2 = nb;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-
-/*
- Reduce rows and columns i:i+nb-1 to bidiagonal form and return
- the matrices X and Y which are needed to update the unreduced
- part of the matrix
-*/
-
- i__3 = *m - i__ + 1;
- i__4 = *n - i__ + 1;
- dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
- i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
- * nb + 1], &ldwrky);
-
-/*
- Update the trailing submatrix A(i+nb:m,i+nb:n), using an update
- of the form A := A - V*Y' - X*U'
-*/
-
- i__3 = *m - i__ - nb + 1;
- i__4 = *n - i__ - nb + 1;
- dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b151, &a[
- i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
- ldwrky, &c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
- i__3 = *m - i__ - nb + 1;
- i__4 = *n - i__ - nb + 1;
- dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b151, &
- work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
- c_b15, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
-
-/* Copy diagonal and off-diagonal elements of B back into A */
-
- if (*m >= *n) {
- i__3 = i__ + nb - 1;
- for (j = i__; j <= i__3; ++j) {
- a[j + j * a_dim1] = d__[j];
- a[j + (j + 1) * a_dim1] = e[j];
-/* L10: */
- }
- } else {
- i__3 = i__ + nb - 1;
- for (j = i__; j <= i__3; ++j) {
- a[j + j * a_dim1] = d__[j];
- a[j + 1 + j * a_dim1] = e[j];
-/* L20: */
- }
- }
-/* L30: */
- }
-
-/* Use unblocked code to reduce the remainder of the matrix */
-
- i__2 = *m - i__ + 1;
- i__1 = *n - i__ + 1;
- dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
- tauq[i__], &taup[i__], &work[1], &iinfo);
- work[1] = ws;
- return 0;
-
-/* End of DGEBRD */
-
-} /* dgebrd_ */
-
-/* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal *
- a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl,
- integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work,
- integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
- i__2, i__3, i__4;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, k;
- static doublereal r__, cs, sn;
- static integer ihi;
- static doublereal scl;
- static integer ilo;
- static doublereal dum[1], eps;
- static integer ibal;
- static char side[1];
- static integer maxb;
- static doublereal anrm;
- static integer ierr, itau;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer iwrk, nout;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern doublereal dlapy2_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
- char *, char *, integer *, integer *, integer *, doublereal *,
- integer *, doublereal *, integer *, integer *),
- dgebal_(char *, integer *, doublereal *, integer *, integer *,
- integer *, doublereal *, integer *);
- static logical scalea;
-
- static doublereal cscale;
- extern doublereal dlange_(char *, integer *, integer *, doublereal *,
- integer *, doublereal *);
- extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- integer *), dlascl_(char *, integer *, integer *, doublereal *,
- doublereal *, integer *, integer *, doublereal *, integer *,
- integer *);
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *),
- dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *), xerbla_(char *, integer *);
- static logical select[1];
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static doublereal bignum;
- extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- integer *), dhseqr_(char *, char *, integer *, integer *, integer
- *, doublereal *, integer *, doublereal *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, integer *, integer *, doublereal *, integer *);
- static integer minwrk, maxwrk;
- static logical wantvl;
- static doublereal smlnum;
- static integer hswork;
- static logical lquery, wantvr;
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- December 8, 1999
-
-
- Purpose
- =======
-
- DGEEV computes for an N-by-N real nonsymmetric matrix A, the
- eigenvalues and, optionally, the left and/or right eigenvectors.
-
- The right eigenvector v(j) of A satisfies
- A * v(j) = lambda(j) * v(j)
- where lambda(j) is its eigenvalue.
- The left eigenvector u(j) of A satisfies
- u(j)**H * A = lambda(j) * u(j)**H
- where u(j)**H denotes the conjugate transpose of u(j).
-
- The computed eigenvectors are normalized to have Euclidean norm
- equal to 1 and largest component real.
-
- Arguments
- =========
-
- JOBVL (input) CHARACTER*1
- = 'N': left eigenvectors of A are not computed;
- = 'V': left eigenvectors of A are computed.
-
- JOBVR (input) CHARACTER*1
- = 'N': right eigenvectors of A are not computed;
- = 'V': right eigenvectors of A are computed.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the N-by-N matrix A.
- On exit, A has been overwritten.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- WR (output) DOUBLE PRECISION array, dimension (N)
- WI (output) DOUBLE PRECISION array, dimension (N)
- WR and WI contain the real and imaginary parts,
- respectively, of the computed eigenvalues. Complex
- conjugate pairs of eigenvalues appear consecutively
- with the eigenvalue having the positive imaginary part
- first.
-
- VL (output) DOUBLE PRECISION array, dimension (LDVL,N)
- If JOBVL = 'V', the left eigenvectors u(j) are stored one
- after another in the columns of VL, in the same order
- as their eigenvalues.
- If JOBVL = 'N', VL is not referenced.
- If the j-th eigenvalue is real, then u(j) = VL(:,j),
- the j-th column of VL.
- If the j-th and (j+1)-st eigenvalues form a complex
- conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and
- u(j+1) = VL(:,j) - i*VL(:,j+1).
-
- LDVL (input) INTEGER
- The leading dimension of the array VL. LDVL >= 1; if
- JOBVL = 'V', LDVL >= N.
-
- VR (output) DOUBLE PRECISION array, dimension (LDVR,N)
- If JOBVR = 'V', the right eigenvectors v(j) are stored one
- after another in the columns of VR, in the same order
- as their eigenvalues.
- If JOBVR = 'N', VR is not referenced.
- If the j-th eigenvalue is real, then v(j) = VR(:,j),
- the j-th column of VR.
- If the j-th and (j+1)-st eigenvalues form a complex
- conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and
- v(j+1) = VR(:,j) - i*VR(:,j+1).
-
- LDVR (input) INTEGER
- The leading dimension of the array VR. LDVR >= 1; if
- JOBVR = 'V', LDVR >= N.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,3*N), and
- if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good
- performance, LWORK must generally be larger.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = i, the QR algorithm failed to compute all the
- eigenvalues, and no eigenvectors have been computed;
- elements i+1:N of WR and WI contain eigenvalues which
- have converged.
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --wr;
- --wi;
- vl_dim1 = *ldvl;
- vl_offset = 1 + vl_dim1 * 1;
- vl -= vl_offset;
- vr_dim1 = *ldvr;
- vr_offset = 1 + vr_dim1 * 1;
- vr -= vr_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- lquery = *lwork == -1;
- wantvl = lsame_(jobvl, "V");
- wantvr = lsame_(jobvr, "V");
- if ((! wantvl && ! lsame_(jobvl, "N"))) {
- *info = -1;
- } else if ((! wantvr && ! lsame_(jobvr, "N"))) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) {
- *info = -9;
- } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) {
- *info = -11;
- }
-
-/*
- Compute workspace
- (Note: Comments in the code beginning "Workspace:" describe the
- minimal amount of workspace needed at that point in the code,
- as well as the preferred amount for good performance.
- NB refers to the optimal block size for the immediately
- following subroutine, as returned by ILAENV.
- HSWORK refers to the workspace preferred by DHSEQR, as
- calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
- the worst case.)
-*/
-
- minwrk = 1;
- if ((*info == 0 && (*lwork >= 1 || lquery))) {
- maxwrk = ((*n) << (1)) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
- n, &c__0, (ftnlen)6, (ftnlen)1);
- if ((! wantvl && ! wantvr)) {
-/* Computing MAX */
- i__1 = 1, i__2 = *n * 3;
- minwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = ilaenv_(&c__8, "DHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
- 6, (ftnlen)2);
- maxb = max(i__1,2);
-/*
- Computing MIN
- Computing MAX
-*/
- i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "EN", n, &c__1, n, &
- c_n1, (ftnlen)6, (ftnlen)2);
- i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
- k = min(i__1,i__2);
-/* Computing MAX */
- i__1 = k * (k + 2), i__2 = (*n) << (1);
- hswork = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
- hswork;
- maxwrk = max(i__1,i__2);
- } else {
-/* Computing MAX */
- i__1 = 1, i__2 = (*n) << (2);
- minwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
- "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = ilaenv_(&c__8, "DHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
- 6, (ftnlen)2);
- maxb = max(i__1,2);
-/*
- Computing MIN
- Computing MAX
-*/
- i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "SV", n, &c__1, n, &
- c_n1, (ftnlen)6, (ftnlen)2);
- i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
- k = min(i__1,i__2);
-/* Computing MAX */
- i__1 = k * (k + 2), i__2 = (*n) << (1);
- hswork = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
- hswork;
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = (*n) << (2);
- maxwrk = max(i__1,i__2);
- }
- work[1] = (doublereal) maxwrk;
- }
- if ((*lwork < minwrk && ! lquery)) {
- *info = -13;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEEV ", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
-/* Get machine constants */
-
- eps = PRECISION;
- smlnum = SAFEMINIMUM;
- bignum = 1. / smlnum;
- dlabad_(&smlnum, &bignum);
- smlnum = sqrt(smlnum) / eps;
- bignum = 1. / smlnum;
-
-/* Scale A if max element outside range [SMLNUM,BIGNUM] */
-
- anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
- scalea = FALSE_;
- if ((anrm > 0. && anrm < smlnum)) {
- scalea = TRUE_;
- cscale = smlnum;
- } else if (anrm > bignum) {
- scalea = TRUE_;
- cscale = bignum;
- }
- if (scalea) {
- dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
- ierr);
- }
-
-/*
- Balance the matrix
- (Workspace: need N)
-*/
-
- ibal = 1;
- dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
-
-/*
- Reduce to upper Hessenberg form
- (Workspace: need 3*N, prefer 2*N+N*NB)
-*/
-
- itau = ibal + *n;
- iwrk = itau + *n;
- i__1 = *lwork - iwrk + 1;
- dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
- &ierr);
-
- if (wantvl) {
-
-/*
- Want left eigenvectors
- Copy Householder vectors to VL
-*/
-
- *(unsigned char *)side = 'L';
- dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
- ;
-
-/*
- Generate orthogonal matrix in VL
- (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
-*/
-
- i__1 = *lwork - iwrk + 1;
- dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
- &i__1, &ierr);
-
-/*
- Perform QR iteration, accumulating Schur vectors in VL
- (Workspace: need N+1, prefer N+HSWORK (see comments) )
-*/
-
- iwrk = itau;
- i__1 = *lwork - iwrk + 1;
- dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
- vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
-
- if (wantvr) {
-
-/*
- Want left and right eigenvectors
- Copy Schur vectors to VR
-*/
-
- *(unsigned char *)side = 'B';
- dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
- }
-
- } else if (wantvr) {
-
-/*
- Want right eigenvectors
- Copy Householder vectors to VR
-*/
-
- *(unsigned char *)side = 'R';
- dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
- ;
-
-/*
- Generate orthogonal matrix in VR
- (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
-*/
-
- i__1 = *lwork - iwrk + 1;
- dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
- &i__1, &ierr);
-
-/*
- Perform QR iteration, accumulating Schur vectors in VR
- (Workspace: need N+1, prefer N+HSWORK (see comments) )
-*/
-
- iwrk = itau;
- i__1 = *lwork - iwrk + 1;
- dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
- vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
-
- } else {
-
-/*
- Compute eigenvalues only
- (Workspace: need N+1, prefer N+HSWORK (see comments) )
-*/
-
- iwrk = itau;
- i__1 = *lwork - iwrk + 1;
- dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
- vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
- }
-
-/* If INFO > 0 from DHSEQR, then quit */
-
- if (*info > 0) {
- goto L50;
- }
-
- if (wantvl || wantvr) {
-
-/*
- Compute left and/or right eigenvectors
- (Workspace: need 4*N)
-*/
-
- dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
- &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
- }
-
- if (wantvl) {
-
-/*
- Undo balancing of left eigenvectors
- (Workspace: need N)
-*/
-
- dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
- &ierr);
-
-/* Normalize left eigenvectors and make largest component real */
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (wi[i__] == 0.) {
- scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
- dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
- } else if (wi[i__] > 0.) {
- d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
- d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
- scl = 1. / dlapy2_(&d__1, &d__2);
- dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
- dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
- i__2 = *n;
- for (k = 1; k <= i__2; ++k) {
-/* Computing 2nd power */
- d__1 = vl[k + i__ * vl_dim1];
-/* Computing 2nd power */
- d__2 = vl[k + (i__ + 1) * vl_dim1];
- work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
-/* L10: */
- }
- k = idamax_(n, &work[iwrk], &c__1);
- dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
- &cs, &sn, &r__);
- drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
- vl_dim1 + 1], &c__1, &cs, &sn);
- vl[k + (i__ + 1) * vl_dim1] = 0.;
- }
-/* L20: */
- }
- }
-
- if (wantvr) {
-
-/*
- Undo balancing of right eigenvectors
- (Workspace: need N)
-*/
-
- dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
- &ierr);
-
-/* Normalize right eigenvectors and make largest component real */
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (wi[i__] == 0.) {
- scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
- dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
- } else if (wi[i__] > 0.) {
- d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
- d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
- scl = 1. / dlapy2_(&d__1, &d__2);
- dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
- dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
- i__2 = *n;
- for (k = 1; k <= i__2; ++k) {
-/* Computing 2nd power */
- d__1 = vr[k + i__ * vr_dim1];
-/* Computing 2nd power */
- d__2 = vr[k + (i__ + 1) * vr_dim1];
- work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
-/* L30: */
- }
- k = idamax_(n, &work[iwrk], &c__1);
- dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
- &cs, &sn, &r__);
- drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
- vr_dim1 + 1], &c__1, &cs, &sn);
- vr[k + (i__ + 1) * vr_dim1] = 0.;
- }
-/* L40: */
- }
- }
-
-/* Undo scaling if necessary */
-
-L50:
- if (scalea) {
- i__1 = *n - *info;
-/* Computing MAX */
- i__3 = *n - *info;
- i__2 = max(i__3,1);
- dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
- 1], &i__2, &ierr);
- i__1 = *n - *info;
-/* Computing MAX */
- i__3 = *n - *info;
- i__2 = max(i__3,1);
- dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
- 1], &i__2, &ierr);
- if (*info > 0) {
- i__1 = ilo - 1;
- dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
- n, &ierr);
- i__1 = ilo - 1;
- dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
- n, &ierr);
- }
- }
-
- work[1] = (doublereal) maxwrk;
- return 0;
-
-/* End of DGEEV */
-
-} /* dgeev_ */
-
-/* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi,
- doublereal *a, integer *lda, doublereal *tau, doublereal *work,
- integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__;
- static doublereal aii;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DGEHD2 reduces a real general matrix A to upper Hessenberg form H by
- an orthogonal similarity transformation: Q' * A * Q = H .
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- It is assumed that A is already upper triangular in rows
- and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
- set by a previous call to DGEBAL; otherwise they should be
- set to 1 and N respectively. See Further Details.
- 1 <= ILO <= IHI <= max(1,N).
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the n by n general matrix to be reduced.
- On exit, the upper triangle and the first subdiagonal of A
- are overwritten with the upper Hessenberg matrix H, and the
- elements below the first subdiagonal, with the array TAU,
- represent the orthogonal matrix Q as a product of elementary
- reflectors. See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- TAU (output) DOUBLE PRECISION array, dimension (N-1)
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of (ihi-ilo) elementary
- reflectors
-
- Q = H(ilo) H(ilo+1) . . . H(ihi-1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
- exit in A(i+2:ihi,i), and tau in TAU(i).
-
- The contents of A are illustrated by the following example, with
- n = 7, ilo = 2 and ihi = 6:
-
- on entry, on exit,
-
- ( a a a a a a a ) ( a a h h h h a )
- ( a a a a a a ) ( a h h h h a )
- ( a a a a a a ) ( h h h h h h )
- ( a a a a a a ) ( v2 h h h h h )
- ( a a a a a a ) ( v2 v3 h h h h )
- ( a a a a a a ) ( v2 v3 v4 h h h )
- ( a ) ( a )
-
- where a denotes an element of the original matrix A, h denotes a
- modified element of the upper Hessenberg matrix H, and vi denotes an
- element of the vector defining H(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*n < 0) {
- *info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
- *info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEHD2", &i__1);
- return 0;
- }
-
- i__1 = *ihi - 1;
- for (i__ = *ilo; i__ <= i__1; ++i__) {
-
-/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
-
- i__2 = *ihi - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
- a_dim1], &c__1, &tau[i__]);
- aii = a[i__ + 1 + i__ * a_dim1];
- a[i__ + 1 + i__ * a_dim1] = 1.;
-
-/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
-
- i__2 = *ihi - i__;
- dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
- i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
-
-/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
-
- i__2 = *ihi - i__;
- i__3 = *n - i__;
- dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
- i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
-
- a[i__ + 1 + i__ * a_dim1] = aii;
-/* L10: */
- }
-
- return 0;
-
-/* End of DGEHD2 */
-
-} /* dgehd2_ */
-
-/* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi,
- doublereal *a, integer *lda, doublereal *tau, doublereal *work,
- integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__;
- static doublereal t[4160] /* was [65][64] */;
- static integer ib;
- static doublereal ei;
- static integer nb, nh, nx, iws;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer nbmin, iinfo;
- extern /* Subroutine */ int dgehd2_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *),
- dlarfb_(char *, char *, char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *), dlahrd_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DGEHRD reduces a real general matrix A to upper Hessenberg form H by
- an orthogonal similarity transformation: Q' * A * Q = H .
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- It is assumed that A is already upper triangular in rows
- and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
- set by a previous call to DGEBAL; otherwise they should be
- set to 1 and N respectively. See Further Details.
- 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the N-by-N general matrix to be reduced.
- On exit, the upper triangle and the first subdiagonal of A
- are overwritten with the upper Hessenberg matrix H, and the
- elements below the first subdiagonal, with the array TAU,
- represent the orthogonal matrix Q as a product of elementary
- reflectors. See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- TAU (output) DOUBLE PRECISION array, dimension (N-1)
- The scalar factors of the elementary reflectors (see Further
- Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
- zero.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The length of the array WORK. LWORK >= max(1,N).
- For optimum performance LWORK >= N*NB, where NB is the
- optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of (ihi-ilo) elementary
- reflectors
-
- Q = H(ilo) H(ilo+1) . . . H(ihi-1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
- exit in A(i+2:ihi,i), and tau in TAU(i).
-
- The contents of A are illustrated by the following example, with
- n = 7, ilo = 2 and ihi = 6:
-
- on entry, on exit,
-
- ( a a a a a a a ) ( a a h h h h a )
- ( a a a a a a ) ( a h h h h a )
- ( a a a a a a ) ( h h h h h h )
- ( a a a a a a ) ( v2 h h h h h )
- ( a a a a a a ) ( v2 v3 h h h h )
- ( a a a a a a ) ( v2 v3 v4 h h h )
- ( a ) ( a )
-
- where a denotes an element of the original matrix A, h denotes a
- modified element of the upper Hessenberg matrix H, and vi denotes an
- element of the vector defining H(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
-/* Computing MIN */
- i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nb = min(i__1,i__2);
- lwkopt = *n * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*n < 0) {
- *info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
- *info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
- *info = -8;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEHRD", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
-
- i__1 = *ilo - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- tau[i__] = 0.;
-/* L10: */
- }
- i__1 = *n - 1;
- for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
- tau[i__] = 0.;
-/* L20: */
- }
-
-/* Quick return if possible */
-
- nh = *ihi - *ilo + 1;
- if (nh <= 1) {
- work[1] = 1.;
- return 0;
- }
-
-/*
- Determine the block size.
-
- Computing MIN
-*/
- i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nb = min(i__1,i__2);
- nbmin = 2;
- iws = 1;
- if ((nb > 1 && nb < nh)) {
-
-/*
- Determine when to cross over from blocked to unblocked code
- (last block is always handled by unblocked code).
-
- Computing MAX
-*/
- i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < nh) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- iws = *n * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: determine the
- minimum value of NB, and reduce NB or force use of
- unblocked code.
-
- Computing MAX
-*/
- i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, &
- c_n1, (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- if (*lwork >= *n * nbmin) {
- nb = *lwork / *n;
- } else {
- nb = 1;
- }
- }
- }
- }
- ldwork = *n;
-
- if (nb < nbmin || nb >= nh) {
-
-/* Use unblocked code below */
-
- i__ = *ilo;
-
- } else {
-
-/* Use blocked code */
-
- i__1 = *ihi - 1 - nx;
- i__2 = nb;
- for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__3 = nb, i__4 = *ihi - i__;
- ib = min(i__3,i__4);
-
-/*
- Reduce columns i:i+ib-1 to Hessenberg form, returning the
- matrices V and T of the block reflector H = I - V*T*V'
- which performs the reduction, and also the matrix Y = A*V*T
-*/
-
- dlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
- c__65, &work[1], &ldwork);
-
-/*
- Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
- right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
- to 1.
-*/
-
- ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
- a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.;
- i__3 = *ihi - i__ - ib + 1;
- dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b151, &
- work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
- c_b15, &a[(i__ + ib) * a_dim1 + 1], lda);
- a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
-
-/*
- Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
- left
-*/
-
- i__3 = *ihi - i__;
- i__4 = *n - i__ - ib + 1;
- dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
- i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
- i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
-/* L30: */
- }
- }
-
-/* Use unblocked code to reduce the rest of the matrix */
-
- dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
- work[1] = (doublereal) iws;
-
- return 0;
-
-/* End of DGEHRD */
-
-} /* dgehrd_ */
-
-/* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *tau, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, k;
- static doublereal aii;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DGELQ2 computes an LQ factorization of a real m by n matrix A:
- A = L * Q.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the m by n matrix A.
- On exit, the elements on and below the diagonal of the array
- contain the m by min(m,n) lower trapezoidal matrix L (L is
- lower triangular if m <= n); the elements above the diagonal,
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors (see Further Details).
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (M)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of elementary reflectors
-
- Q = H(k) . . . H(2) H(1), where k = min(m,n).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
- and tau in TAU(i).
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGELQ2", &i__1);
- return 0;
- }
-
- k = min(*m,*n);
-
- i__1 = k;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
-
- i__2 = *n - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1]
- , lda, &tau[i__]);
- if (i__ < *m) {
-
-/* Apply H(i) to A(i+1:m,i:n) from the right */
-
- aii = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
- i__2 = *m - i__;
- i__3 = *n - i__ + 1;
- dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
- i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
- a[i__ + i__ * a_dim1] = aii;
- }
-/* L10: */
- }
- return 0;
-
-/* End of DGELQ2 */
-
-} /* dgelq2_ */
-
-/* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
- char *, char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DGELQF computes an LQ factorization of a real M-by-N matrix A:
- A = L * Q.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
- On exit, the elements on and below the diagonal of the array
- contain the m-by-min(m,n) lower trapezoidal matrix L (L is
- lower triangular if m <= n); the elements above the diagonal,
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors (see Further Details).
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,M).
- For optimum performance LWORK >= M*NB, where NB is the
- optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of elementary reflectors
-
- Q = H(k) . . . H(2) H(1), where k = min(m,n).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
- and tau in TAU(i).
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
- 1);
- lwkopt = *m * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- } else if ((*lwork < max(1,*m) && ! lquery)) {
- *info = -7;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGELQF", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- k = min(*m,*n);
- if (k == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *m;
- if ((nb > 1 && nb < k)) {
-
-/*
- Determine when to cross over from blocked to unblocked code.
-
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < k) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- ldwork = *m;
- iws = ldwork * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
-
- nb = *lwork / ldwork;
-/* Computing MAX */
- i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, &
- c_n1, (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- }
- }
- }
-
- if (((nb >= nbmin && nb < k) && nx < k)) {
-
-/* Use blocked code initially */
-
- i__1 = k - nx;
- i__2 = nb;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__3 = k - i__ + 1;
- ib = min(i__3,nb);
-
-/*
- Compute the LQ factorization of the current block
- A(i:i+ib-1,i:n)
-*/
-
- i__3 = *n - i__ + 1;
- dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
- 1], &iinfo);
- if (i__ + ib <= *m) {
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__3 = *n - i__ + 1;
- dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
- a_dim1], lda, &tau[i__], &work[1], &ldwork);
-
-/* Apply H to A(i+ib:m,i:n) from the right */
-
- i__3 = *m - i__ - ib + 1;
- i__4 = *n - i__ + 1;
- dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
- &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
- ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
- 1], &ldwork);
- }
-/* L10: */
- }
- } else {
- i__ = 1;
- }
-
-/* Use unblocked code to factor the last or only block. */
-
- if (i__ <= k) {
- i__2 = *m - i__ + 1;
- i__1 = *n - i__ + 1;
- dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
- , &iinfo);
- }
-
- work[1] = (doublereal) iws;
- return 0;
-
-/* End of DGELQF */
-
-} /* dgelqf_ */
-
-/* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs,
- doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
- s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork,
- integer *iwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
-
- /* Builtin functions */
- double log(doublereal);
-
- /* Local variables */
- static integer ie, il, mm;
- static doublereal eps, anrm, bnrm;
- static integer itau, nlvl, iascl, ibscl;
- static doublereal sfmin;
- static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
- extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_(
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- integer *);
- extern doublereal dlamch_(char *), dlange_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *);
- extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, integer *),
- dlalsd_(char *, integer *, integer *, integer *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *, integer *), dlascl_(char *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- integer *, doublereal *, integer *, integer *), dgeqrf_(
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, integer *), dlacpy_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
- doublereal *, doublereal *, integer *), xerbla_(char *,
- integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static doublereal bignum;
- extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, integer *);
- static integer wlalsd;
- extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *);
- static integer ldwork;
- extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *);
- static integer minwrk, maxwrk;
- static doublereal smlnum;
- static logical lquery;
- static integer smlsiz;
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DGELSD computes the minimum-norm solution to a real linear least
- squares problem:
- minimize 2-norm(| b - A*x |)
- using the singular value decomposition (SVD) of A. A is an M-by-N
- matrix which may be rank-deficient.
-
- Several right hand side vectors b and solution vectors x can be
- handled in a single call; they are stored as the columns of the
- M-by-NRHS right hand side matrix B and the N-by-NRHS solution
- matrix X.
-
- The problem is solved in three steps:
- (1) Reduce the coefficient matrix A to bidiagonal form with
- Householder transformations, reducing the original problem
- into a "bidiagonal least squares problem" (BLS)
- (2) Solve the BLS using a divide and conquer approach.
- (3) Apply back all the Householder tranformations to solve
- the original least squares problem.
-
- The effective rank of A is determined by treating as zero those
- singular values which are less than RCOND times the largest singular
- value.
-
- The divide and conquer algorithm makes very mild assumptions about
- floating point arithmetic. It will work on machines with a guard
- digit in add/subtract, or on those binary machines without guard
- digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
- Cray-2. It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of A. M >= 0.
-
- N (input) INTEGER
- The number of columns of A. N >= 0.
-
- NRHS (input) INTEGER
- The number of right hand sides, i.e., the number of columns
- of the matrices B and X. NRHS >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
- On exit, A has been destroyed.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
- On entry, the M-by-NRHS right hand side matrix B.
- On exit, B is overwritten by the N-by-NRHS solution
- matrix X. If m >= n and RANK = n, the residual
- sum-of-squares for the solution in the i-th column is given
- by the sum of squares of elements n+1:m in that column.
-
- LDB (input) INTEGER
- The leading dimension of the array B. LDB >= max(1,max(M,N)).
-
- S (output) DOUBLE PRECISION array, dimension (min(M,N))
- The singular values of A in decreasing order.
- The condition number of A in the 2-norm = S(1)/S(min(m,n)).
-
- RCOND (input) DOUBLE PRECISION
- RCOND is used to determine the effective rank of A.
- Singular values S(i) <= RCOND*S(1) are treated as zero.
- If RCOND < 0, machine precision is used instead.
-
- RANK (output) INTEGER
- The effective rank of A, i.e., the number of singular values
- which are greater than RCOND*S(1).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK must be at least 1.
- The exact minimum amount of workspace needed depends on M,
- N and NRHS. As long as LWORK is at least
- 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2,
- if M is greater than or equal to N or
- 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2,
- if M is less than N, the code will execute correctly.
- SMLSIZ is returned by ILAENV and is equal to the maximum
- size of the subproblems at the bottom of the computation
- tree (usually about 25), and
- NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
- For good performance, LWORK should generally be larger.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- IWORK (workspace) INTEGER array, dimension (LIWORK)
- LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
- where MINMN = MIN( M,N ).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: the algorithm for computing the SVD failed to converge;
- if INFO = i, i off-diagonal elements of an intermediate
- bidiagonal form did not converge to zero.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Ren-Cang Li, Computer Science Division, University of
- California at Berkeley, USA
- Osni Marques, LBNL/NERSC, USA
-
- =====================================================================
-
-
- Test the input arguments.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- --s;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
- minmn = min(*m,*n);
- maxmn = max(*m,*n);
- mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
- ftnlen)1);
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*nrhs < 0) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- } else if (*ldb < max(1,maxmn)) {
- *info = -7;
- }
-
- smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
- ftnlen)6, (ftnlen)1);
-
-/*
- Compute workspace.
- (Note: Comments in the code beginning "Workspace:" describe the
- minimal amount of workspace needed at that point in the code,
- as well as the preferred amount for good performance.
- NB refers to the optimal block size for the immediately
- following subroutine, as returned by ILAENV.)
-*/
-
- minwrk = 1;
- minmn = max(1,minmn);
-/* Computing MAX */
- i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) /
- log(2.)) + 1;
- nlvl = max(i__1,0);
-
- if (*info == 0) {
- maxwrk = 0;
- mm = *m;
- if ((*m >= *n && *m >= mnthr)) {
-
-/* Path 1a - overdetermined, with many more rows than columns. */
-
- mm = *n;
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m,
- n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT",
- m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
- maxwrk = max(i__1,i__2);
- }
- if (*m >= *n) {
-
-/*
- Path 1 - overdetermined or exactly determined.
-
- Computing MAX
-*/
- i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD"
- , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR",
- "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR",
- "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
-/* Computing 2nd power */
- i__1 = smlsiz + 1;
- wlalsd = *n * 9 + ((*n) << (1)) * smlsiz + ((*n) << (3)) * nlvl +
- *n * *nrhs + i__1 * i__1;
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2),
- i__2 = *n * 3 + wlalsd;
- minwrk = max(i__1,i__2);
- }
- if (*n > *m) {
-/* Computing 2nd power */
- i__1 = smlsiz + 1;
- wlalsd = *m * 9 + ((*m) << (1)) * smlsiz + ((*m) << (3)) * nlvl +
- *m * *nrhs + i__1 * i__1;
- if (*n >= mnthr) {
-
-/*
- Path 2a - underdetermined, with many more columns
- than rows.
-*/
-
- maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1,
- &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
- * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
- ilaenv_(&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, (
- ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
- ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, (
- ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
- if (*nrhs > 1) {
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
- maxwrk = max(i__1,i__2);
- } else {
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
- maxwrk = max(i__1,i__2);
- }
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ",
- "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + wlalsd;
- maxwrk = max(i__1,i__2);
- } else {
-
-/* Path 2 - remaining underdetermined cases. */
-
- maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m,
- n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR"
- , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR",
- "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3);
- maxwrk = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
- maxwrk = max(i__1,i__2);
- }
-/* Computing MAX */
- i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2),
- i__2 = *m * 3 + wlalsd;
- minwrk = max(i__1,i__2);
- }
- minwrk = min(minwrk,maxwrk);
- work[1] = (doublereal) maxwrk;
- if ((*lwork < minwrk && ! lquery)) {
- *info = -12;
- }
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGELSD", &i__1);
- return 0;
- } else if (lquery) {
- goto L10;
- }
-
-/* Quick return if possible. */
-
- if (*m == 0 || *n == 0) {
- *rank = 0;
- return 0;
- }
-
-/* Get machine parameters. */
-
- eps = PRECISION;
- sfmin = SAFEMINIMUM;
- smlnum = sfmin / eps;
- bignum = 1. / smlnum;
- dlabad_(&smlnum, &bignum);
-
-/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
-
- anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
- iascl = 0;
- if ((anrm > 0. && anrm < smlnum)) {
-
-/* Scale matrix norm up to SMLNUM. */
-
- dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
- info);
- iascl = 1;
- } else if (anrm > bignum) {
-
-/* Scale matrix norm down to BIGNUM. */
-
- dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
- info);
- iascl = 2;
- } else if (anrm == 0.) {
-
-/* Matrix all zero. Return zero solution. */
-
- i__1 = max(*m,*n);
- dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
- dlaset_("F", &minmn, &c__1, &c_b29, &c_b29, &s[1], &c__1);
- *rank = 0;
- goto L10;
- }
-
-/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
-
- bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
- ibscl = 0;
- if ((bnrm > 0. && bnrm < smlnum)) {
-
-/* Scale matrix norm up to SMLNUM. */
-
- dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
- info);
- ibscl = 1;
- } else if (bnrm > bignum) {
-
-/* Scale matrix norm down to BIGNUM. */
-
- dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
- info);
- ibscl = 2;
- }
-
-/* If M < N make sure certain entries of B are zero. */
-
- if (*m < *n) {
- i__1 = *n - *m;
- dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1], ldb);
- }
-
-/* Overdetermined case. */
-
- if (*m >= *n) {
-
-/* Path 1 - overdetermined or exactly determined. */
-
- mm = *m;
- if (*m >= mnthr) {
-
-/* Path 1a - overdetermined, with many more rows than columns. */
-
- mm = *n;
- itau = 1;
- nwork = itau + *n;
-
-/*
- Compute A=Q*R.
- (Workspace: need 2*N, prefer N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
- info);
-
-/*
- Multiply B by transpose(Q).
- (Workspace: need N+NRHS, prefer N+NRHS*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
- b_offset], ldb, &work[nwork], &i__1, info);
-
-/* Zero out below R. */
-
- if (*n > 1) {
- i__1 = *n - 1;
- i__2 = *n - 1;
- dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2],
- lda);
- }
- }
-
- ie = 1;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize R in A.
- (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
- work[itaup], &work[nwork], &i__1, info);
-
-/*
- Multiply B by transpose of left bidiagonalizing vectors of R.
- (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
- &b[b_offset], ldb, &work[nwork], &i__1, info);
-
-/* Solve the bidiagonal least squares problem. */
-
- dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
- rcond, rank, &work[nwork], &iwork[1], info);
- if (*info != 0) {
- goto L10;
- }
-
-/* Multiply B by right bidiagonalizing vectors of R. */
-
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
- b[b_offset], ldb, &work[nwork], &i__1, info);
-
- } else /* if(complicated condition) */ {
-/* Computing MAX */
- i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
- max(i__1,*nrhs), i__2 = *n - *m * 3;
- if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2)
- )) {
-
-/*
- Path 2a - underdetermined, with many more columns than rows
- and sufficient workspace for an efficient algorithm.
-*/
-
- ldwork = *m;
-/*
- Computing MAX
- Computing MAX
-*/
- i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
- max(i__3,*nrhs), i__4 = *n - *m * 3;
- i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
- lda + *m + *m * *nrhs;
- if (*lwork >= max(i__1,i__2)) {
- ldwork = *lda;
- }
- itau = 1;
- nwork = *m + 1;
-
-/*
- Compute A=L*Q.
- (Workspace: need 2*M, prefer M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
- info);
- il = nwork;
-
-/* Copy L to WORK(IL), zeroing out above its diagonal. */
-
- dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
- i__1 = *m - 1;
- i__2 = *m - 1;
- dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwork], &
- ldwork);
- ie = il + ldwork * *m;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize L in WORK(IL).
- (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
- &work[itaup], &work[nwork], &i__1, info);
-
-/*
- Multiply B by transpose of left bidiagonalizing vectors of L.
- (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
- itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
-
-/* Solve the bidiagonal least squares problem. */
-
- dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
- ldb, rcond, rank, &work[nwork], &iwork[1], info);
- if (*info != 0) {
- goto L10;
- }
-
-/* Multiply B by right bidiagonalizing vectors of L. */
-
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
- itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
-
-/* Zero out below first M rows of B. */
-
- i__1 = *n - *m;
- dlaset_("F", &i__1, nrhs, &c_b29, &c_b29, &b[*m + 1 + b_dim1],
- ldb);
- nwork = itau + *m;
-
-/*
- Multiply transpose(Q) by B.
- (Workspace: need M+NRHS, prefer M+NRHS*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
- b_offset], ldb, &work[nwork], &i__1, info);
-
- } else {
-
-/* Path 2 - remaining underdetermined cases. */
-
- ie = 1;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize A.
- (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
- work[itaup], &work[nwork], &i__1, info);
-
-/*
- Multiply B by transpose of left bidiagonalizing vectors.
- (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
- , &b[b_offset], ldb, &work[nwork], &i__1, info);
-
-/* Solve the bidiagonal least squares problem. */
-
- dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
- ldb, rcond, rank, &work[nwork], &iwork[1], info);
- if (*info != 0) {
- goto L10;
- }
-
-/* Multiply B by right bidiagonalizing vectors of A. */
-
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
- , &b[b_offset], ldb, &work[nwork], &i__1, info);
-
- }
- }
-
-/* Undo scaling. */
-
- if (iascl == 1) {
- dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
- info);
- dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
- minmn, info);
- } else if (iascl == 2) {
- dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
- info);
- dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
- minmn, info);
- }
- if (ibscl == 1) {
- dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
- info);
- } else if (ibscl == 2) {
- dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
- info);
- }
-
-L10:
- work[1] = (doublereal) maxwrk;
- return 0;
-
-/* End of DGELSD */
-
-} /* dgelsd_ */
-
-/* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *tau, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, k;
- static doublereal aii;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DGEQR2 computes a QR factorization of a real m by n matrix A:
- A = Q * R.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the m by n matrix A.
- On exit, the elements on and above the diagonal of the array
- contain the min(m,n) by n upper trapezoidal matrix R (R is
- upper triangular if m >= n); the elements below the diagonal,
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors (see Further Details).
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (N)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of elementary reflectors
-
- Q = H(1) H(2) . . . H(k), where k = min(m,n).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
- and tau in TAU(i).
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEQR2", &i__1);
- return 0;
- }
-
- k = min(*m,*n);
-
- i__1 = k;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
-
- i__2 = *m - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
- , &c__1, &tau[i__]);
- if (i__ < *n) {
-
-/* Apply H(i) to A(i:m,i+1:n) from the left */
-
- aii = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
- i__2 = *m - i__ + 1;
- i__3 = *n - i__;
- dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
- i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
- a[i__ + i__ * a_dim1] = aii;
- }
-/* L10: */
- }
- return 0;
-
-/* End of DGEQR2 */
-
-} /* dgeqr2_ */
-
-/* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer *
- lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
- char *, char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DGEQRF computes a QR factorization of a real M-by-N matrix A:
- A = Q * R.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
- On exit, the elements on and above the diagonal of the array
- contain the min(M,N)-by-N upper trapezoidal matrix R (R is
- upper triangular if m >= n); the elements below the diagonal,
- with the array TAU, represent the orthogonal matrix Q as a
- product of min(m,n) elementary reflectors (see Further
- Details).
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,N).
- For optimum performance LWORK >= N*NB, where NB is
- the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of elementary reflectors
-
- Q = H(1) H(2) . . . H(k), where k = min(m,n).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
- and tau in TAU(i).
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
- 1);
- lwkopt = *n * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
- *info = -7;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGEQRF", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- k = min(*m,*n);
- if (k == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *n;
- if ((nb > 1 && nb < k)) {
-
-/*
- Determine when to cross over from blocked to unblocked code.
-
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < k) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- ldwork = *n;
- iws = ldwork * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
-
- nb = *lwork / ldwork;
-/* Computing MAX */
- i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, &
- c_n1, (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- }
- }
- }
-
- if (((nb >= nbmin && nb < k) && nx < k)) {
-
-/* Use blocked code initially */
-
- i__1 = k - nx;
- i__2 = nb;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__3 = k - i__ + 1;
- ib = min(i__3,nb);
-
-/*
- Compute the QR factorization of the current block
- A(i:m,i:i+ib-1)
-*/
-
- i__3 = *m - i__ + 1;
- dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
- 1], &iinfo);
- if (i__ + ib <= *n) {
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__3 = *m - i__ + 1;
- dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
- a_dim1], lda, &tau[i__], &work[1], &ldwork);
-
-/* Apply H' to A(i:m,i+ib:n) from the left */
-
- i__3 = *m - i__ + 1;
- i__4 = *n - i__ - ib + 1;
- dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
- i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
- ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
- + 1], &ldwork);
- }
-/* L10: */
- }
- } else {
- i__ = 1;
- }
-
-/* Use unblocked code to factor the last or only block. */
-
- if (i__ <= k) {
- i__2 = *m - i__ + 1;
- i__1 = *n - i__ + 1;
- dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
- , &iinfo);
- }
-
- work[1] = (doublereal) iws;
- return 0;
-
-/* End of DGEQRF */
-
-} /* dgeqrf_ */
-
-/* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal *
- a, integer *lda, doublereal *s, doublereal *u, integer *ldu,
- doublereal *vt, integer *ldvt, doublereal *work, integer *lwork,
- integer *iwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
- i__2, i__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, ie, il, ir, iu, blk;
- static doublereal dum[1], eps;
- static integer ivt, iscl;
- static doublereal anrm;
- static integer idum[1], ierr, itau;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- extern logical lsame_(char *, char *);
- static integer chunk, minmn, wrkbl, itaup, itauq, mnthr;
- static logical wntqa;
- static integer nwork;
- static logical wntqn, wntqo, wntqs;
- extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
- *, doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *, integer *), dgebrd_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, integer *);
- extern doublereal dlamch_(char *), dlange_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *);
- static integer bdspac;
- extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, integer *),
- dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
- integer *, integer *, doublereal *, integer *, integer *),
- dgeqrf_(integer *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- integer *), dlaset_(char *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, integer *),
- xerbla_(char *, integer *), dorgbr_(char *, integer *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static doublereal bignum;
- extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, integer *);
- static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
- static doublereal smlnum;
- static logical wntqas, lquery;
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DGESDD computes the singular value decomposition (SVD) of a real
- M-by-N matrix A, optionally computing the left and right singular
- vectors. If singular vectors are desired, it uses a
- divide-and-conquer algorithm.
-
- The SVD is written
-
- A = U * SIGMA * transpose(V)
-
- where SIGMA is an M-by-N matrix which is zero except for its
- min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and
- V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA
- are the singular values of A; they are real and non-negative, and
- are returned in descending order. The first min(m,n) columns of
- U and V are the left and right singular vectors of A.
-
- Note that the routine returns VT = V**T, not V.
-
- The divide and conquer algorithm makes very mild assumptions about
- floating point arithmetic. It will work on machines with a guard
- digit in add/subtract, or on those binary machines without guard
- digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
- Cray-2. It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- Arguments
- =========
-
- JOBZ (input) CHARACTER*1
- Specifies options for computing all or part of the matrix U:
- = 'A': all M columns of U and all N rows of V**T are
- returned in the arrays U and VT;
- = 'S': the first min(M,N) columns of U and the first
- min(M,N) rows of V**T are returned in the arrays U
- and VT;
- = 'O': If M >= N, the first N columns of U are overwritten
- on the array A and all rows of V**T are returned in
- the array VT;
- otherwise, all columns of U are returned in the
- array U and the first M rows of V**T are overwritten
- in the array VT;
- = 'N': no columns of U or rows of V**T are computed.
-
- M (input) INTEGER
- The number of rows of the input matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the input matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N matrix A.
- On exit,
- if JOBZ = 'O', A is overwritten with the first N columns
- of U (the left singular vectors, stored
- columnwise) if M >= N;
- A is overwritten with the first M rows
- of V**T (the right singular vectors, stored
- rowwise) otherwise.
- if JOBZ .ne. 'O', the contents of A are destroyed.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- S (output) DOUBLE PRECISION array, dimension (min(M,N))
- The singular values of A, sorted so that S(i) >= S(i+1).
-
- U (output) DOUBLE PRECISION array, dimension (LDU,UCOL)
- UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
- UCOL = min(M,N) if JOBZ = 'S'.
- If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
- orthogonal matrix U;
- if JOBZ = 'S', U contains the first min(M,N) columns of U
- (the left singular vectors, stored columnwise);
- if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= 1; if
- JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
-
- VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
- If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
- N-by-N orthogonal matrix V**T;
- if JOBZ = 'S', VT contains the first min(M,N) rows of
- V**T (the right singular vectors, stored rowwise);
- if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT. LDVT >= 1; if
- JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
- if JOBZ = 'S', LDVT >= min(M,N).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK;
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= 1.
- If JOBZ = 'N',
- LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)).
- If JOBZ = 'O',
- LWORK >= 3*min(M,N)*min(M,N) +
- max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)).
- If JOBZ = 'S' or 'A'
- LWORK >= 3*min(M,N)*min(M,N) +
- max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)).
- For good performance, LWORK should generally be larger.
- If LWORK < 0 but other input arguments are legal, WORK(1)
- returns the optimal LWORK.
-
- IWORK (workspace) INTEGER array, dimension (8*min(M,N))
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: DBDSDC did not converge, updating process failed.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --s;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
- minmn = min(*m,*n);
- mnthr = (integer) (minmn * 11. / 6.);
- wntqa = lsame_(jobz, "A");
- wntqs = lsame_(jobz, "S");
- wntqas = wntqa || wntqs;
- wntqo = lsame_(jobz, "O");
- wntqn = lsame_(jobz, "N");
- minwrk = 1;
- maxwrk = 1;
- lquery = *lwork == -1;
-
- if (! (wntqa || wntqs || wntqo || wntqn)) {
- *info = -1;
- } else if (*m < 0) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && *
- ldu < *m)) {
- *info = -8;
- } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn)
- || ((wntqo && *m >= *n) && *ldvt < *n)) {
- *info = -10;
- }
-
-/*
- Compute workspace
- (Note: Comments in the code beginning "Workspace:" describe the
- minimal amount of workspace needed at that point in the code,
- as well as the preferred amount for good performance.
- NB refers to the optimal block size for the immediately
- following subroutine, as returned by ILAENV.)
-*/
-
- if (((*info == 0 && *m > 0) && *n > 0)) {
- if (*m >= *n) {
-
-/* Compute space needed for DBDSDC */
-
- if (wntqn) {
- bdspac = *n * 7;
- } else {
- bdspac = *n * 3 * *n + ((*n) << (2));
- }
- if (*m >= mnthr) {
- if (wntqn) {
-
-/* Path 1 (M much larger than N, JOBZ='N') */
-
- wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n;
- maxwrk = max(i__1,i__2);
- minwrk = bdspac + *n;
- } else if (wntqo) {
-
-/* Path 2 (M much larger than N, JOBZ='O') */
-
- wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
- " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + ((*n) << (1)) * *n;
- minwrk = bdspac + ((*n) << (1)) * *n + *n * 3;
- } else if (wntqs) {
-
-/* Path 3 (M much larger than N, JOBZ='S') */
-
- wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
- " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *n * *n;
- minwrk = bdspac + *n * *n + *n * 3;
- } else if (wntqa) {
-
-/* Path 4 (M much larger than N, JOBZ='A') */
-
- wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR",
- " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *n * *n;
- minwrk = bdspac + *n * *n + *n * 3;
- }
- } else {
-
-/* Path 5 (M at least N, but not much larger) */
-
- wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
- n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
- if (wntqn) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *n * 3 + max(*m,bdspac);
- } else if (wntqo) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *m * *n;
-/* Computing MAX */
- i__1 = *m, i__2 = *n * *n + bdspac;
- minwrk = *n * 3 + max(i__1,i__2);
- } else if (wntqs) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *n * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *n * 3 + max(*m,bdspac);
- } else if (wntqa) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = maxwrk, i__2 = bdspac + *n * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *n * 3 + max(*m,bdspac);
- }
- }
- } else {
-
-/* Compute space needed for DBDSDC */
-
- if (wntqn) {
- bdspac = *m * 7;
- } else {
- bdspac = *m * 3 * *m + ((*m) << (2));
- }
- if (*n >= mnthr) {
- if (wntqn) {
-
-/* Path 1t (N much larger than M, JOBZ='N') */
-
- wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m;
- maxwrk = max(i__1,i__2);
- minwrk = bdspac + *m;
- } else if (wntqo) {
-
-/* Path 2t (N much larger than M, JOBZ='O') */
-
- wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
- " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + ((*m) << (1)) * *m;
- minwrk = bdspac + ((*m) << (1)) * *m + *m * 3;
- } else if (wntqs) {
-
-/* Path 3t (N much larger than M, JOBZ='S') */
-
- wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
- " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *m * *m;
- minwrk = bdspac + *m * *m + *m * 3;
- } else if (wntqa) {
-
-/* Path 4t (N much larger than M, JOBZ='A') */
-
- wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
- c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ",
- " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
- c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
- 6, (ftnlen)1);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *m * *m;
- minwrk = bdspac + *m * *m + *m * 3;
- }
- } else {
-
-/* Path 5t (N greater than M, but not much larger) */
-
- wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
- n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
- if (wntqn) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *m * 3 + max(*n,bdspac);
- } else if (wntqo) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- wrkbl = max(i__1,i__2);
- maxwrk = wrkbl + *m * *n;
-/* Computing MAX */
- i__1 = *n, i__2 = *m * *m + bdspac;
- minwrk = *m * 3 + max(i__1,i__2);
- } else if (wntqs) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *m * 3 + max(*n,bdspac);
- } else if (wntqa) {
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
- , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
- wrkbl = max(i__1,i__2);
-/* Computing MAX */
- i__1 = wrkbl, i__2 = bdspac + *m * 3;
- maxwrk = max(i__1,i__2);
- minwrk = *m * 3 + max(*n,bdspac);
- }
- }
- }
- work[1] = (doublereal) maxwrk;
- }
-
- if ((*lwork < minwrk && ! lquery)) {
- *info = -12;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGESDD", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0) {
- if (*lwork >= 1) {
- work[1] = 1.;
- }
- return 0;
- }
-
-/* Get machine constants */
-
- eps = PRECISION;
- smlnum = sqrt(SAFEMINIMUM) / eps;
- bignum = 1. / smlnum;
-
-/* Scale A if max element outside range [SMLNUM,BIGNUM] */
-
- anrm = dlange_("M", m, n, &a[a_offset], lda, dum);
- iscl = 0;
- if ((anrm > 0. && anrm < smlnum)) {
- iscl = 1;
- dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
- ierr);
- } else if (anrm > bignum) {
- iscl = 1;
- dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
- ierr);
- }
-
- if (*m >= *n) {
-
-/*
- A has at least as many rows as columns. If A has sufficiently
- more rows than columns, first reduce using the QR
- decomposition (if sufficient workspace available)
-*/
-
- if (*m >= mnthr) {
-
- if (wntqn) {
-
-/*
- Path 1 (M much larger than N, JOBZ='N')
- No singular vectors to be computed
-*/
-
- itau = 1;
- nwork = itau + *n;
-
-/*
- Compute A=Q*R
- (Workspace: need 2*N, prefer N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__1, &ierr);
-
-/* Zero out below R */
-
- i__1 = *n - 1;
- i__2 = *n - 1;
- dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &a[a_dim1 + 2],
- lda);
- ie = 1;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize R in A
- (Workspace: need 4*N, prefer 3*N+2*N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__1, &ierr);
- nwork = ie + *n;
-
-/*
- Perform bidiagonal SVD, computing singular values only
- (Workspace: need N+BDSPAC)
-*/
-
- dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
- dum, idum, &work[nwork], &iwork[1], info);
-
- } else if (wntqo) {
-
-/*
- Path 2 (M much larger than N, JOBZ = 'O')
- N left singular vectors to be overwritten on A and
- N right singular vectors to be computed in VT
-*/
-
- ir = 1;
-
-/* WORK(IR) is LDWRKR by N */
-
- if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
- ldwrkr = *lda;
- } else {
- ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
- }
- itau = ir + ldwrkr * *n;
- nwork = itau + *n;
-
-/*
- Compute A=Q*R
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__1, &ierr);
-
-/* Copy R to WORK(IR), zeroing out below it */
-
- dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
- i__1 = *n - 1;
- i__2 = *n - 1;
- dlaset_("L", &i__1, &i__2, &c_b29, &c_b29, &work[ir + 1], &
- ldwrkr);
-
-/*
- Generate Q in A
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
- &i__1, &ierr);
- ie = itau;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize R in VT, copying result to WORK(IR)
- (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__1, &ierr);
-
-/* WORK(IU) is N by N */
-
- iu = nwork;
- nwork = iu + *n * *n;
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in WORK(IU) and computing right
- singular vectors of bidiagonal matrix in VT
- (Workspace: need N+N*N+BDSPAC)
-*/
-
- dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite WORK(IU) by left singular vectors of R
- and VT by right singular vectors of R
- (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
- itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
- ierr);
-
-/*
- Multiply Q in A by left singular vectors of R in
- WORK(IU), storing result in WORK(IR) and copying to A
- (Workspace: need 2*N*N, prefer N*N+M*N)
-*/
-
- i__1 = *m;
- i__2 = ldwrkr;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
- i__2) {
-/* Computing MIN */
- i__3 = *m - i__ + 1;
- chunk = min(i__3,ldwrkr);
- dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ + a_dim1],
- lda, &work[iu], n, &c_b29, &work[ir], &ldwrkr);
- dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
- a_dim1], lda);
-/* L10: */
- }
-
- } else if (wntqs) {
-
-/*
- Path 3 (M much larger than N, JOBZ='S')
- N left singular vectors to be computed in U and
- N right singular vectors to be computed in VT
-*/
-
- ir = 1;
-
-/* WORK(IR) is N by N */
-
- ldwrkr = *n;
- itau = ir + ldwrkr * *n;
- nwork = itau + *n;
-
-/*
- Compute A=Q*R
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__2, &ierr);
-
-/* Copy R to WORK(IR), zeroing out below it */
-
- dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
- i__2 = *n - 1;
- i__1 = *n - 1;
- dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &work[ir + 1], &
- ldwrkr);
-
-/*
- Generate Q in A
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
- &i__2, &ierr);
- ie = itau;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize R in WORK(IR)
- (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__2, &ierr);
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagoal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need N+BDSPAC)
-*/
-
- dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite U by left singular vectors of R and VT
- by right singular vectors of R
- (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
-
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
- ierr);
-
-/*
- Multiply Q in A by left singular vectors of R in
- WORK(IR), storing result in U
- (Workspace: need N*N)
-*/
-
- dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
- dgemm_("N", "N", m, n, n, &c_b15, &a[a_offset], lda, &work[ir]
- , &ldwrkr, &c_b29, &u[u_offset], ldu);
-
- } else if (wntqa) {
-
-/*
- Path 4 (M much larger than N, JOBZ='A')
- M left singular vectors to be computed in U and
- N right singular vectors to be computed in VT
-*/
-
- iu = 1;
-
-/* WORK(IU) is N by N */
-
- ldwrku = *n;
- itau = iu + ldwrku * *n;
- nwork = itau + *n;
-
-/*
- Compute A=Q*R, copying result to U
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__2, &ierr);
- dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
-
-/*
- Generate Q in U
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
- i__2 = *lwork - nwork + 1;
- dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
- &i__2, &ierr);
-
-/* Produce R in A, zeroing out other entries */
-
- i__2 = *n - 1;
- i__1 = *n - 1;
- dlaset_("L", &i__2, &i__1, &c_b29, &c_b29, &a[a_dim1 + 2],
- lda);
- ie = itau;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize R in A
- (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__2, &ierr);
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in WORK(IU) and computing right
- singular vectors of bidiagonal matrix in VT
- (Workspace: need N+N*N+BDSPAC)
-*/
-
- dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite WORK(IU) by left singular vectors of R and VT
- by right singular vectors of R
- (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
- itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
- ierr);
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
- ierr);
-
-/*
- Multiply Q in U by left singular vectors of R in
- WORK(IU), storing result in A
- (Workspace: need N*N)
-*/
-
- dgemm_("N", "N", m, n, n, &c_b15, &u[u_offset], ldu, &work[iu]
- , &ldwrku, &c_b29, &a[a_offset], lda);
-
-/* Copy left singular vectors of A from A to U */
-
- dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
-
- }
-
- } else {
-
-/*
- M .LT. MNTHR
-
- Path 5 (M at least N, but not much larger)
- Reduce to bidiagonal form without QR decomposition
-*/
-
- ie = 1;
- itauq = ie + *n;
- itaup = itauq + *n;
- nwork = itaup + *n;
-
-/*
- Bidiagonalize A
- (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
- work[itaup], &work[nwork], &i__2, &ierr);
- if (wntqn) {
-
-/*
- Perform bidiagonal SVD, only computing singular values
- (Workspace: need N+BDSPAC)
-*/
-
- dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
- dum, idum, &work[nwork], &iwork[1], info);
- } else if (wntqo) {
- iu = nwork;
- if (*lwork >= *m * *n + *n * 3 + bdspac) {
-
-/* WORK( IU ) is M by N */
-
- ldwrku = *m;
- nwork = iu + ldwrku * *n;
- dlaset_("F", m, n, &c_b29, &c_b29, &work[iu], &ldwrku);
- } else {
-
-/* WORK( IU ) is N by N */
-
- ldwrku = *n;
- nwork = iu + ldwrku * *n;
-
-/* WORK(IR) is LDWRKR by N */
-
- ir = nwork;
- ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
- }
- nwork = iu + ldwrku * *n;
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in WORK(IU) and computing right
- singular vectors of bidiagonal matrix in VT
- (Workspace: need N+N*N+BDSPAC)
-*/
-
- dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
- vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
- 1], info);
-
-/*
- Overwrite VT by right singular vectors of A
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
- ierr);
-
- if (*lwork >= *m * *n + *n * 3 + bdspac) {
-
-/*
- Overwrite WORK(IU) by left singular vectors of A
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
- itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
- ierr);
-
-/* Copy left singular vectors of A from WORK(IU) to A */
-
- dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
- } else {
-
-/*
- Generate Q in A
- (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
- work[nwork], &i__2, &ierr);
-
-/*
- Multiply Q in A by left singular vectors of
- bidiagonal matrix in WORK(IU), storing result in
- WORK(IR) and copying to A
- (Workspace: need 2*N*N, prefer N*N+M*N)
-*/
-
- i__2 = *m;
- i__1 = ldwrkr;
- for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
- i__1) {
-/* Computing MIN */
- i__3 = *m - i__ + 1;
- chunk = min(i__3,ldwrkr);
- dgemm_("N", "N", &chunk, n, n, &c_b15, &a[i__ +
- a_dim1], lda, &work[iu], &ldwrku, &c_b29, &
- work[ir], &ldwrkr);
- dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
- a_dim1], lda);
-/* L20: */
- }
- }
-
- } else if (wntqs) {
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need N+BDSPAC)
-*/
-
- dlaset_("F", m, n, &c_b29, &c_b29, &u[u_offset], ldu);
- dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite U by left singular vectors of A and VT
- by right singular vectors of A
- (Workspace: need 3*N, prefer 2*N+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
- ierr);
- } else if (wntqa) {
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need N+BDSPAC)
-*/
-
- dlaset_("F", m, m, &c_b29, &c_b29, &u[u_offset], ldu);
- dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/* Set the right corner of U to identity matrix */
-
- i__1 = *m - *n;
- i__2 = *m - *n;
- dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &u[*n + 1 + (*n +
- 1) * u_dim1], ldu);
-
-/*
- Overwrite U by left singular vectors of A and VT
- by right singular vectors of A
- (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
- ierr);
- }
-
- }
-
- } else {
-
-/*
- A has more columns than rows. If A has sufficiently more
- columns than rows, first reduce using the LQ decomposition (if
- sufficient workspace available)
-*/
-
- if (*n >= mnthr) {
-
- if (wntqn) {
-
-/*
- Path 1t (N much larger than M, JOBZ='N')
- No singular vectors to be computed
-*/
-
- itau = 1;
- nwork = itau + *m;
-
-/*
- Compute A=L*Q
- (Workspace: need 2*M, prefer M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__1, &ierr);
-
-/* Zero out above L */
-
- i__1 = *m - 1;
- i__2 = *m - 1;
- dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &a[((a_dim1) << (1)
- ) + 1], lda);
- ie = 1;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize L in A
- (Workspace: need 4*M, prefer 3*M+2*M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__1, &ierr);
- nwork = ie + *m;
-
-/*
- Perform bidiagonal SVD, computing singular values only
- (Workspace: need M+BDSPAC)
-*/
-
- dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
- dum, idum, &work[nwork], &iwork[1], info);
-
- } else if (wntqo) {
-
-/*
- Path 2t (N much larger than M, JOBZ='O')
- M right singular vectors to be overwritten on A and
- M left singular vectors to be computed in U
-*/
-
- ivt = 1;
-
-/* IVT is M by M */
-
- il = ivt + *m * *m;
- if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
-
-/* WORK(IL) is M by N */
-
- ldwrkl = *m;
- chunk = *n;
- } else {
- ldwrkl = *m;
- chunk = (*lwork - *m * *m) / *m;
- }
- itau = il + ldwrkl * *m;
- nwork = itau + *m;
-
-/*
- Compute A=L*Q
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__1, &ierr);
-
-/* Copy L to WORK(IL), zeroing about above it */
-
- dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
- i__1 = *m - 1;
- i__2 = *m - 1;
- dlaset_("U", &i__1, &i__2, &c_b29, &c_b29, &work[il + ldwrkl],
- &ldwrkl);
-
-/*
- Generate Q in A
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
- &i__1, &ierr);
- ie = itau;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize L in WORK(IL)
- (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__1, &ierr);
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U, and computing right singular
- vectors of bidiagonal matrix in WORK(IVT)
- (Workspace: need M+M*M+BDSPAC)
-*/
-
- dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
- work[ivt], m, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite U by left singular vectors of L and WORK(IVT)
- by right singular vectors of L
- (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
- itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
-
-/*
- Multiply right singular vectors of L in WORK(IVT) by Q
- in A, storing result in WORK(IL) and copying to A
- (Workspace: need 2*M*M, prefer M*M+M*N)
-*/
-
- i__1 = *n;
- i__2 = chunk;
- for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
- i__2) {
-/* Computing MIN */
- i__3 = *n - i__ + 1;
- blk = min(i__3,chunk);
- dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], m, &a[
- i__ * a_dim1 + 1], lda, &c_b29, &work[il], &
- ldwrkl);
- dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
- + 1], lda);
-/* L30: */
- }
-
- } else if (wntqs) {
-
-/*
- Path 3t (N much larger than M, JOBZ='S')
- M right singular vectors to be computed in VT and
- M left singular vectors to be computed in U
-*/
-
- il = 1;
-
-/* WORK(IL) is M by M */
-
- ldwrkl = *m;
- itau = il + ldwrkl * *m;
- nwork = itau + *m;
-
-/*
- Compute A=L*Q
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__2, &ierr);
-
-/* Copy L to WORK(IL), zeroing out above it */
-
- dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
- i__2 = *m - 1;
- i__1 = *m - 1;
- dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &work[il + ldwrkl],
- &ldwrkl);
-
-/*
- Generate Q in A
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
- &i__2, &ierr);
- ie = itau;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize L in WORK(IU), copying result to U
- (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__2, &ierr);
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need M+BDSPAC)
-*/
-
- dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite U by left singular vectors of L and VT
- by right singular vectors of L
- (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
- ierr);
-
-/*
- Multiply right singular vectors of L in WORK(IL) by
- Q in A, storing result in VT
- (Workspace: need M*M)
-*/
-
- dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
- dgemm_("N", "N", m, n, m, &c_b15, &work[il], &ldwrkl, &a[
- a_offset], lda, &c_b29, &vt[vt_offset], ldvt);
-
- } else if (wntqa) {
-
-/*
- Path 4t (N much larger than M, JOBZ='A')
- N right singular vectors to be computed in VT and
- M left singular vectors to be computed in U
-*/
-
- ivt = 1;
-
-/* WORK(IVT) is M by M */
-
- ldwkvt = *m;
- itau = ivt + ldwkvt * *m;
- nwork = itau + *m;
-
-/*
- Compute A=L*Q, copying result to VT
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
- i__2, &ierr);
- dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
-
-/*
- Generate Q in VT
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
- nwork], &i__2, &ierr);
-
-/* Produce L in A, zeroing out other entries */
-
- i__2 = *m - 1;
- i__1 = *m - 1;
- dlaset_("U", &i__2, &i__1, &c_b29, &c_b29, &a[((a_dim1) << (1)
- ) + 1], lda);
- ie = itau;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize L in A
- (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
- itauq], &work[itaup], &work[nwork], &i__2, &ierr);
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in WORK(IVT)
- (Workspace: need M+M*M+BDSPAC)
-*/
-
- dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
- work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
- , info);
-
-/*
- Overwrite U by left singular vectors of L and WORK(IVT)
- by right singular vectors of L
- (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
- itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
- ierr);
-
-/*
- Multiply right singular vectors of L in WORK(IVT) by
- Q in VT, storing result in A
- (Workspace: need M*M)
-*/
-
- dgemm_("N", "N", m, n, m, &c_b15, &work[ivt], &ldwkvt, &vt[
- vt_offset], ldvt, &c_b29, &a[a_offset], lda);
-
-/* Copy right singular vectors of A from A to VT */
-
- dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
-
- }
-
- } else {
-
-/*
- N .LT. MNTHR
-
- Path 5t (N greater than M, but not much larger)
- Reduce to bidiagonal form without LQ decomposition
-*/
-
- ie = 1;
- itauq = ie + *m;
- itaup = itauq + *m;
- nwork = itaup + *m;
-
-/*
- Bidiagonalize A
- (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
- work[itaup], &work[nwork], &i__2, &ierr);
- if (wntqn) {
-
-/*
- Perform bidiagonal SVD, only computing singular values
- (Workspace: need M+BDSPAC)
-*/
-
- dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
- dum, idum, &work[nwork], &iwork[1], info);
- } else if (wntqo) {
- ldwkvt = *m;
- ivt = nwork;
- if (*lwork >= *m * *n + *m * 3 + bdspac) {
-
-/* WORK( IVT ) is M by N */
-
- dlaset_("F", m, n, &c_b29, &c_b29, &work[ivt], &ldwkvt);
- nwork = ivt + ldwkvt * *n;
- } else {
-
-/* WORK( IVT ) is M by M */
-
- nwork = ivt + ldwkvt * *m;
- il = nwork;
-
-/* WORK(IL) is M by CHUNK */
-
- chunk = (*lwork - *m * *m - *m * 3) / *m;
- }
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in WORK(IVT)
- (Workspace: need M*M+BDSPAC)
-*/
-
- dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
- work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
- , info);
-
-/*
- Overwrite U by left singular vectors of A
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
-
- if (*lwork >= *m * *n + *m * 3 + bdspac) {
-
-/*
- Overwrite WORK(IVT) by left singular vectors of A
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
- itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
- &ierr);
-
-/* Copy right singular vectors of A from WORK(IVT) to A */
-
- dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
- } else {
-
-/*
- Generate P**T in A
- (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
-*/
-
- i__2 = *lwork - nwork + 1;
- dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
- work[nwork], &i__2, &ierr);
-
-/*
- Multiply Q in A by right singular vectors of
- bidiagonal matrix in WORK(IVT), storing result in
- WORK(IL) and copying to A
- (Workspace: need 2*M*M, prefer M*M+M*N)
-*/
-
- i__2 = *n;
- i__1 = chunk;
- for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
- i__1) {
-/* Computing MIN */
- i__3 = *n - i__ + 1;
- blk = min(i__3,chunk);
- dgemm_("N", "N", m, &blk, m, &c_b15, &work[ivt], &
- ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b29, &
- work[il], m);
- dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
- 1], lda);
-/* L40: */
- }
- }
- } else if (wntqs) {
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need M+BDSPAC)
-*/
-
- dlaset_("F", m, n, &c_b29, &c_b29, &vt[vt_offset], ldvt);
- dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/*
- Overwrite U by left singular vectors of A and VT
- by right singular vectors of A
- (Workspace: need 3*M, prefer 2*M+M*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
- ierr);
- } else if (wntqa) {
-
-/*
- Perform bidiagonal SVD, computing left singular vectors
- of bidiagonal matrix in U and computing right singular
- vectors of bidiagonal matrix in VT
- (Workspace: need M+BDSPAC)
-*/
-
- dlaset_("F", n, n, &c_b29, &c_b29, &vt[vt_offset], ldvt);
- dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
- vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
- info);
-
-/* Set the right corner of VT to identity matrix */
-
- i__1 = *n - *m;
- i__2 = *n - *m;
- dlaset_("F", &i__1, &i__2, &c_b29, &c_b15, &vt[*m + 1 + (*m +
- 1) * vt_dim1], ldvt);
-
-/*
- Overwrite U by left singular vectors of A and VT
- by right singular vectors of A
- (Workspace: need 2*M+N, prefer 2*M+N*NB)
-*/
-
- i__1 = *lwork - nwork + 1;
- dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
- itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
- i__1 = *lwork - nwork + 1;
- dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
- itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
- ierr);
- }
-
- }
-
- }
-
-/* Undo scaling if necessary */
-
- if (iscl == 1) {
- if (anrm > bignum) {
- dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
- minmn, &ierr);
- }
- if (anrm < smlnum) {
- dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
- minmn, &ierr);
- }
- }
-
-/* Return optimal workspace in WORK(1) */
-
- work[1] = (doublereal) maxwrk;
-
- return 0;
-
-/* End of DGESDD */
-
-} /* dgesdd_ */
-
-/* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer
- *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1;
-
- /* Local variables */
- extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *,
- integer *, integer *, doublereal *, integer *, integer *);
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- March 31, 1993
-
-
- Purpose
- =======
-
- DGESV computes the solution to a real system of linear equations
- A * X = B,
- where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
-
- The LU decomposition with partial pivoting and row interchanges is
- used to factor A as
- A = P * L * U,
- where P is a permutation matrix, L is unit lower triangular, and U is
- upper triangular. The factored form of A is then used to solve the
- system of equations A * X = B.
-
- Arguments
- =========
-
- N (input) INTEGER
- The number of linear equations, i.e., the order of the
- matrix A. N >= 0.
-
- NRHS (input) INTEGER
- The number of right hand sides, i.e., the number of columns
- of the matrix B. NRHS >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the N-by-N coefficient matrix A.
- On exit, the factors L and U from the factorization
- A = P*L*U; the unit diagonal elements of L are not stored.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- IPIV (output) INTEGER array, dimension (N)
- The pivot indices that define the permutation matrix P;
- row i of the matrix was interchanged with row IPIV(i).
-
- B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
- On entry, the N-by-NRHS matrix of right hand side matrix B.
- On exit, if INFO = 0, the N-by-NRHS solution matrix X.
-
- LDB (input) INTEGER
- The leading dimension of the array B. LDB >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
- has been completed, but the factor U is exactly
- singular, so the solution could not be computed.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipiv;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- *info = 0;
- if (*n < 0) {
- *info = -1;
- } else if (*nrhs < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- } else if (*ldb < max(1,*n)) {
- *info = -7;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGESV ", &i__1);
- return 0;
- }
-
-/* Compute the LU factorization of A. */
-
- dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
- if (*info == 0) {
-
-/* Solve the system A*X = B, overwriting B with X. */
-
- dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
- b_offset], ldb, info);
- }
- return 0;
-
-/* End of DGESV */
-
-} /* dgesv_ */
-
-/* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer *
- lda, integer *ipiv, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
- doublereal d__1;
-
- /* Local variables */
- static integer j, jp;
- extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *), dscal_(integer *, doublereal *, doublereal *, integer
- *), dswap_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1992
-
-
- Purpose
- =======
-
- DGETF2 computes an LU factorization of a general m-by-n matrix A
- using partial pivoting with row interchanges.
-
- The factorization has the form
- A = P * L * U
- where P is a permutation matrix, L is lower triangular with unit
- diagonal elements (lower trapezoidal if m > n), and U is upper
- triangular (upper trapezoidal if m < n).
-
- This is the right-looking Level 2 BLAS version of the algorithm.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the m by n matrix to be factored.
- On exit, the factors L and U from the factorization
- A = P*L*U; the unit diagonal elements of L are not stored.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- IPIV (output) INTEGER array, dimension (min(M,N))
- The pivot indices; for 1 <= i <= min(M,N), row i of the
- matrix was interchanged with row IPIV(i).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
- has been completed, but the factor U is exactly
- singular, and division by zero will occur if it is used
- to solve a system of equations.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipiv;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGETF2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0) {
- return 0;
- }
-
- i__1 = min(*m,*n);
- for (j = 1; j <= i__1; ++j) {
-
-/* Find pivot and test for singularity. */
-
- i__2 = *m - j + 1;
- jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1);
- ipiv[j] = jp;
- if (a[jp + j * a_dim1] != 0.) {
-
-/* Apply the interchange to columns 1:N. */
-
- if (jp != j) {
- dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
- }
-
-/* Compute elements J+1:M of J-th column. */
-
- if (j < *m) {
- i__2 = *m - j;
- d__1 = 1. / a[j + j * a_dim1];
- dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
- }
-
- } else if (*info == 0) {
-
- *info = j;
- }
-
- if (j < min(*m,*n)) {
-
-/* Update trailing submatrix. */
-
- i__2 = *m - j;
- i__3 = *n - j;
- dger_(&i__2, &i__3, &c_b151, &a[j + 1 + j * a_dim1], &c__1, &a[j
- + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1],
- lda);
- }
-/* L10: */
- }
- return 0;
-
-/* End of DGETF2 */
-
-} /* dgetf2_ */
-
-/* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer *
- lda, integer *ipiv, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
-
- /* Local variables */
- static integer i__, j, jb, nb;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer iinfo;
- extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *), dgetf2_(
- integer *, integer *, doublereal *, integer *, integer *, integer
- *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
- integer *, integer *, integer *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- March 31, 1993
-
-
- Purpose
- =======
-
- DGETRF computes an LU factorization of a general M-by-N matrix A
- using partial pivoting with row interchanges.
-
- The factorization has the form
- A = P * L * U
- where P is a permutation matrix, L is lower triangular with unit
- diagonal elements (lower trapezoidal if m > n), and U is upper
- triangular (upper trapezoidal if m < n).
-
- This is the right-looking Level 3 BLAS version of the algorithm.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the M-by-N matrix to be factored.
- On exit, the factors L and U from the factorization
- A = P*L*U; the unit diagonal elements of L are not stored.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- IPIV (output) INTEGER array, dimension (min(M,N))
- The pivot indices; for 1 <= i <= min(M,N), row i of the
- matrix was interchanged with row IPIV(i).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
- has been completed, but the factor U is exactly
- singular, and division by zero will occur if it is used
- to solve a system of equations.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipiv;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*m)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGETRF", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0) {
- return 0;
- }
-
-/* Determine the block size for this environment. */
-
- nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
- 1);
- if (nb <= 1 || nb >= min(*m,*n)) {
-
-/* Use unblocked code. */
-
- dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
- } else {
-
-/* Use blocked code. */
-
- i__1 = min(*m,*n);
- i__2 = nb;
- for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
-/* Computing MIN */
- i__3 = min(*m,*n) - j + 1;
- jb = min(i__3,nb);
-
-/*
- Factor diagonal and subdiagonal blocks and test for exact
- singularity.
-*/
-
- i__3 = *m - j + 1;
- dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
-
-/* Adjust INFO and the pivot indices. */
-
- if ((*info == 0 && iinfo > 0)) {
- *info = iinfo + j - 1;
- }
-/* Computing MIN */
- i__4 = *m, i__5 = j + jb - 1;
- i__3 = min(i__4,i__5);
- for (i__ = j; i__ <= i__3; ++i__) {
- ipiv[i__] = j - 1 + ipiv[i__];
-/* L10: */
- }
-
-/* Apply interchanges to columns 1:J-1. */
-
- i__3 = j - 1;
- i__4 = j + jb - 1;
- dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
-
- if (j + jb <= *n) {
-
-/* Apply interchanges to columns J+JB:N. */
-
- i__3 = *n - j - jb + 1;
- i__4 = j + jb - 1;
- dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
- ipiv[1], &c__1);
-
-/* Compute block row of U. */
-
- i__3 = *n - j - jb + 1;
- dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
- c_b15, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
- a_dim1], lda);
- if (j + jb <= *m) {
-
-/* Update trailing submatrix. */
-
- i__3 = *m - j - jb + 1;
- i__4 = *n - j - jb + 1;
- dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
- &c_b151, &a[j + jb + j * a_dim1], lda, &a[j + (j
- + jb) * a_dim1], lda, &c_b15, &a[j + jb + (j + jb)
- * a_dim1], lda);
- }
- }
-/* L20: */
- }
- }
- return 0;
-
-/* End of DGETRF */
-
-} /* dgetrf_ */
-
-/* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs,
- doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *
- ldb, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1;
-
- /* Local variables */
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *), xerbla_(
- char *, integer *), dlaswp_(integer *, doublereal *,
- integer *, integer *, integer *, integer *, integer *);
- static logical notran;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- March 31, 1993
-
-
- Purpose
- =======
-
- DGETRS solves a system of linear equations
- A * X = B or A' * X = B
- with a general N-by-N matrix A using the LU factorization computed
- by DGETRF.
-
- Arguments
- =========
-
- TRANS (input) CHARACTER*1
- Specifies the form of the system of equations:
- = 'N': A * X = B (No transpose)
- = 'T': A'* X = B (Transpose)
- = 'C': A'* X = B (Conjugate transpose = Transpose)
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- NRHS (input) INTEGER
- The number of right hand sides, i.e., the number of columns
- of the matrix B. NRHS >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- The factors L and U from the factorization A = P*L*U
- as computed by DGETRF.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- IPIV (input) INTEGER array, dimension (N)
- The pivot indices from DGETRF; for 1<=i<=N, row i of the
- matrix was interchanged with row IPIV(i).
-
- B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
- On entry, the right hand side matrix B.
- On exit, the solution matrix X.
-
- LDB (input) INTEGER
- The leading dimension of the array B. LDB >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipiv;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- *info = 0;
- notran = lsame_(trans, "N");
- if (((! notran && ! lsame_(trans, "T")) && ! lsame_(
- trans, "C"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*nrhs < 0) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- } else if (*ldb < max(1,*n)) {
- *info = -8;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DGETRS", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0 || *nrhs == 0) {
- return 0;
- }
-
- if (notran) {
-
-/*
- Solve A * X = B.
-
- Apply row interchanges to the right hand sides.
-*/
-
- dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
-
-/* Solve L*X = B, overwriting B with X. */
-
- dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b15, &a[
- a_offset], lda, &b[b_offset], ldb);
-
-/* Solve U*X = B, overwriting B with X. */
-
- dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b15, &
- a[a_offset], lda, &b[b_offset], ldb);
- } else {
-
-/*
- Solve A' * X = B.
-
- Solve U'*X = B, overwriting B with X.
-*/
-
- dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b15, &a[
- a_offset], lda, &b[b_offset], ldb);
-
-/* Solve L'*X = B, overwriting B with X. */
-
- dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b15, &a[
- a_offset], lda, &b[b_offset], ldb);
-
-/* Apply row interchanges to the solution vectors. */
-
- dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
- }
-
- return 0;
-
-/* End of DGETRS */
-
-} /* dgetrs_ */
-
-/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo,
- integer *ihi, doublereal *h__, integer *ldh, doublereal *wr,
- doublereal *wi, doublereal *z__, integer *ldz, doublereal *work,
- integer *lwork, integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4,
- i__5;
- doublereal d__1, d__2;
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i__, j, k, l;
- static doublereal s[225] /* was [15][15] */, v[16];
- static integer i1, i2, ii, nh, nr, ns, nv;
- static doublereal vv[16];
- static integer itn;
- static doublereal tau;
- static integer its;
- static doublereal ulp, tst1;
- static integer maxb;
- static doublereal absw;
- static integer ierr;
- static doublereal unfl, temp, ovfl;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *);
- static integer itemp;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static logical initz, wantt, wantz;
- extern doublereal dlapy2_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
-
- extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
- integer *, doublereal *);
- extern integer idamax_(integer *, doublereal *, integer *);
- extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
- doublereal *);
- extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, integer *, doublereal *, integer *,
- integer *), dlacpy_(char *, integer *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlaset_(char *,
- integer *, integer *, doublereal *, doublereal *, doublereal *,
- integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int xerbla_(char *, integer *), dlarfx_(
- char *, integer *, integer *, doublereal *, doublereal *,
- doublereal *, integer *, doublereal *);
- static doublereal smlnum;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DHSEQR computes the eigenvalues of a real upper Hessenberg matrix H
- and, optionally, the matrices T and Z from the Schur decomposition
- H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur
- form), and Z is the orthogonal matrix of Schur vectors.
-
- Optionally Z may be postmultiplied into an input orthogonal matrix Q,
- so that this routine can give the Schur factorization of a matrix A
- which has been reduced to the Hessenberg form H by the orthogonal
- matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T.
-
- Arguments
- =========
-
- JOB (input) CHARACTER*1
- = 'E': compute eigenvalues only;
- = 'S': compute eigenvalues and the Schur form T.
-
- COMPZ (input) CHARACTER*1
- = 'N': no Schur vectors are computed;
- = 'I': Z is initialized to the unit matrix and the matrix Z
- of Schur vectors of H is returned;
- = 'V': Z must contain an orthogonal matrix Q on entry, and
- the product Q*Z is returned.
-
- N (input) INTEGER
- The order of the matrix H. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- It is assumed that H is already upper triangular in rows
- and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
- set by a previous call to DGEBAL, and then passed to SGEHRD
- when the matrix output by DGEBAL is reduced to Hessenberg
- form. Otherwise ILO and IHI should be set to 1 and N
- respectively.
- 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
-
- H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
- On entry, the upper Hessenberg matrix H.
- On exit, if JOB = 'S', H contains the upper quasi-triangular
- matrix T from the Schur decomposition (the Schur form);
- 2-by-2 diagonal blocks (corresponding to complex conjugate
- pairs of eigenvalues) are returned in standard form, with
- H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E',
- the contents of H are unspecified on exit.
-
- LDH (input) INTEGER
- The leading dimension of the array H. LDH >= max(1,N).
-
- WR (output) DOUBLE PRECISION array, dimension (N)
- WI (output) DOUBLE PRECISION array, dimension (N)
- The real and imaginary parts, respectively, of the computed
- eigenvalues. If two eigenvalues are computed as a complex
- conjugate pair, they are stored in consecutive elements of
- WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and
- WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the
- same order as on the diagonal of the Schur form returned in
- H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2
- diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and
- WI(i+1) = -WI(i).
-
- Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
- If COMPZ = 'N': Z is not referenced.
- If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
- contains the orthogonal matrix Z of the Schur vectors of H.
- If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
- which is assumed to be equal to the unit matrix except for
- the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
- Normally Q is the orthogonal matrix generated by DORGHR after
- the call to DGEHRD which formed the Hessenberg matrix H.
-
- LDZ (input) INTEGER
- The leading dimension of the array Z.
- LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,N).
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: if INFO = i, DHSEQR failed to compute all of the
- eigenvalues in a total of 30*(IHI-ILO+1) iterations;
- elements 1:ilo-1 and i+1:n of WR and WI contain those
- eigenvalues which have been successfully computed.
-
- =====================================================================
-
-
- Decode and test the input parameters
-*/
-
- /* Parameter adjustments */
- h_dim1 = *ldh;
- h_offset = 1 + h_dim1 * 1;
- h__ -= h_offset;
- --wr;
- --wi;
- z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
- --work;
-
- /* Function Body */
- wantt = lsame_(job, "S");
- initz = lsame_(compz, "I");
- wantz = initz || lsame_(compz, "V");
-
- *info = 0;
- work[1] = (doublereal) max(1,*n);
- lquery = *lwork == -1;
- if ((! lsame_(job, "E") && ! wantt)) {
- *info = -1;
- } else if ((! lsame_(compz, "N") && ! wantz)) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
- *info = -4;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
- *info = -5;
- } else if (*ldh < max(1,*n)) {
- *info = -7;
- } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) {
- *info = -11;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
- *info = -13;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DHSEQR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Initialize Z, if necessary */
-
- if (initz) {
- dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
- }
-
-/* Store the eigenvalues isolated by DGEBAL. */
-
- i__1 = *ilo - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- wr[i__] = h__[i__ + i__ * h_dim1];
- wi[i__] = 0.;
-/* L10: */
- }
- i__1 = *n;
- for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
- wr[i__] = h__[i__ + i__ * h_dim1];
- wi[i__] = 0.;
-/* L20: */
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
- if (*ilo == *ihi) {
- wr[*ilo] = h__[*ilo + *ilo * h_dim1];
- wi[*ilo] = 0.;
- return 0;
- }
-
-/*
- Set rows and columns ILO to IHI to zero below the first
- subdiagonal.
-*/
-
- i__1 = *ihi - 2;
- for (j = *ilo; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j + 2; i__ <= i__2; ++i__) {
- h__[i__ + j * h_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
- nh = *ihi - *ilo + 1;
-
-/*
- Determine the order of the multi-shift QR algorithm to be used.
-
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = job;
- i__3[1] = 1, a__1[1] = compz;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- ns = ilaenv_(&c__4, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
- ftnlen)2);
-/* Writing concatenation */
- i__3[0] = 1, a__1[0] = job;
- i__3[1] = 1, a__1[1] = compz;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- maxb = ilaenv_(&c__8, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
- ftnlen)2);
- if (ns <= 2 || ns > nh || maxb >= nh) {
-
-/* Use the standard double-shift algorithm */
-
- dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
- 1], ilo, ihi, &z__[z_offset], ldz, info);
- return 0;
- }
- maxb = max(3,maxb);
-/* Computing MIN */
- i__1 = min(ns,maxb);
- ns = min(i__1,15);
-
-/*
- Now 2 < NS <= MAXB < NH.
-
- Set machine-dependent constants for the stopping criterion.
- If norm(H) <= sqrt(OVFL), overflow should not occur.
-*/
-
- unfl = SAFEMINIMUM;
- ovfl = 1. / unfl;
- dlabad_(&unfl, &ovfl);
- ulp = PRECISION;
- smlnum = unfl * (nh / ulp);
-
-/*
- I1 and I2 are the indices of the first row and last column of H
- to which transformations must be applied. If eigenvalues only are
- being computed, I1 and I2 are set inside the main loop.
-*/
-
- if (wantt) {
- i1 = 1;
- i2 = *n;
- }
-
-/* ITN is the total number of multiple-shift QR iterations allowed. */
-
- itn = nh * 30;
-
-/*
- The main loop begins here. I is the loop index and decreases from
- IHI to ILO in steps of at most MAXB. Each iteration of the loop
- works with the active submatrix in rows and columns L to I.
- Eigenvalues I+1 to IHI have already converged. Either L = ILO or
- H(L,L-1) is negligible so that the matrix splits.
-*/
-
- i__ = *ihi;
-L50:
- l = *ilo;
- if (i__ < *ilo) {
- goto L170;
- }
-
-/*
- Perform multiple-shift QR iterations on rows and columns ILO to I
- until a submatrix of order at most MAXB splits off at the bottom
- because a subdiagonal element has become negligible.
-*/
-
- i__1 = itn;
- for (its = 0; its <= i__1; ++its) {
-
-/* Look for a single small subdiagonal element. */
-
- i__2 = l + 1;
- for (k = i__; k >= i__2; --k) {
- tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
- h__[k + k * h_dim1], abs(d__2));
- if (tst1 == 0.) {
- i__4 = i__ - l + 1;
- tst1 = dlanhs_("1", &i__4, &h__[l + l * h_dim1], ldh, &work[1]
- );
- }
-/* Computing MAX */
- d__2 = ulp * tst1;
- if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
- smlnum)) {
- goto L70;
- }
-/* L60: */
- }
-L70:
- l = k;
- if (l > *ilo) {
-
-/* H(L,L-1) is negligible. */
-
- h__[l + (l - 1) * h_dim1] = 0.;
- }
-
-/* Exit from loop if a submatrix of order <= MAXB has split off. */
-
- if (l >= i__ - maxb + 1) {
- goto L160;
- }
-
-/*
- Now the active submatrix is in rows and columns L to I. If
- eigenvalues only are being computed, only the active submatrix
- need be transformed.
-*/
-
- if (! wantt) {
- i1 = l;
- i2 = i__;
- }
-
- if (its == 20 || its == 30) {
-
-/* Exceptional shifts. */
-
- i__2 = i__;
- for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
- wr[ii] = ((d__1 = h__[ii + (ii - 1) * h_dim1], abs(d__1)) + (
- d__2 = h__[ii + ii * h_dim1], abs(d__2))) * 1.5;
- wi[ii] = 0.;
-/* L80: */
- }
- } else {
-
-/* Use eigenvalues of trailing submatrix of order NS as shifts. */
-
- dlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
- h_dim1], ldh, s, &c__15);
- dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ -
- ns + 1], &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset],
- ldz, &ierr);
- if (ierr > 0) {
-
-/*
- If DLAHQR failed to compute all NS eigenvalues, use the
- unconverged diagonal elements as the remaining shifts.
-*/
-
- i__2 = ierr;
- for (ii = 1; ii <= i__2; ++ii) {
- wr[i__ - ns + ii] = s[ii + ii * 15 - 16];
- wi[i__ - ns + ii] = 0.;
-/* L90: */
- }
- }
- }
-
-/*
- Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
- where G is the Hessenberg submatrix H(L:I,L:I) and w is
- the vector of shifts (stored in WR and WI). The result is
- stored in the local array V.
-*/
-
- v[0] = 1.;
- i__2 = ns + 1;
- for (ii = 2; ii <= i__2; ++ii) {
- v[ii - 1] = 0.;
-/* L100: */
- }
- nv = 1;
- i__2 = i__;
- for (j = i__ - ns + 1; j <= i__2; ++j) {
- if (wi[j] >= 0.) {
- if (wi[j] == 0.) {
-
-/* real shift */
-
- i__4 = nv + 1;
- dcopy_(&i__4, v, &c__1, vv, &c__1);
- i__4 = nv + 1;
- d__1 = -wr[j];
- dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l *
- h_dim1], ldh, vv, &c__1, &d__1, v, &c__1);
- ++nv;
- } else if (wi[j] > 0.) {
-
-/* complex conjugate pair of shifts */
-
- i__4 = nv + 1;
- dcopy_(&i__4, v, &c__1, vv, &c__1);
- i__4 = nv + 1;
- d__1 = wr[j] * -2.;
- dgemv_("No transpose", &i__4, &nv, &c_b15, &h__[l + l *
- h_dim1], ldh, v, &c__1, &d__1, vv, &c__1);
- i__4 = nv + 1;
- itemp = idamax_(&i__4, vv, &c__1);
-/* Computing MAX */
- d__2 = (d__1 = vv[itemp - 1], abs(d__1));
- temp = 1. / max(d__2,smlnum);
- i__4 = nv + 1;
- dscal_(&i__4, &temp, vv, &c__1);
- absw = dlapy2_(&wr[j], &wi[j]);
- temp = temp * absw * absw;
- i__4 = nv + 2;
- i__5 = nv + 1;
- dgemv_("No transpose", &i__4, &i__5, &c_b15, &h__[l + l *
- h_dim1], ldh, vv, &c__1, &temp, v, &c__1);
- nv += 2;
- }
-
-/*
- Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
- reset it to the unit vector.
-*/
-
- itemp = idamax_(&nv, v, &c__1);
- temp = (d__1 = v[itemp - 1], abs(d__1));
- if (temp == 0.) {
- v[0] = 1.;
- i__4 = nv;
- for (ii = 2; ii <= i__4; ++ii) {
- v[ii - 1] = 0.;
-/* L110: */
- }
- } else {
- temp = max(temp,smlnum);
- d__1 = 1. / temp;
- dscal_(&nv, &d__1, v, &c__1);
- }
- }
-/* L120: */
- }
-
-/* Multiple-shift QR step */
-
- i__2 = i__ - 1;
- for (k = l; k <= i__2; ++k) {
-
-/*
- The first iteration of this loop determines a reflection G
- from the vector V and applies it from left and right to H,
- thus creating a nonzero bulge below the subdiagonal.
-
- Each subsequent iteration determines a reflection G to
- restore the Hessenberg form in the (K-1)th column, and thus
- chases the bulge one step toward the bottom of the active
- submatrix. NR is the order of G.
-
- Computing MIN
-*/
- i__4 = ns + 1, i__5 = i__ - k + 1;
- nr = min(i__4,i__5);
- if (k > l) {
- dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
- }
- dlarfg_(&nr, v, &v[1], &c__1, &tau);
- if (k > l) {
- h__[k + (k - 1) * h_dim1] = v[0];
- i__4 = i__;
- for (ii = k + 1; ii <= i__4; ++ii) {
- h__[ii + (k - 1) * h_dim1] = 0.;
-/* L130: */
- }
- }
- v[0] = 1.;
-
-/*
- Apply G from the left to transform the rows of the matrix in
- columns K to I2.
-*/
-
- i__4 = i2 - k + 1;
- dlarfx_("Left", &nr, &i__4, v, &tau, &h__[k + k * h_dim1], ldh, &
- work[1]);
-
-/*
- Apply G from the right to transform the columns of the
- matrix in rows I1 to min(K+NR,I).
-
- Computing MIN
-*/
- i__5 = k + nr;
- i__4 = min(i__5,i__) - i1 + 1;
- dlarfx_("Right", &i__4, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
- &work[1]);
-
- if (wantz) {
-
-/* Accumulate transformations in the matrix Z */
-
- dlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
- ldz, &work[1]);
- }
-/* L140: */
- }
-
-/* L150: */
- }
-
-/* Failure to converge in remaining number of iterations */
-
- *info = i__;
- return 0;
-
-L160:
-
-/*
- A submatrix of order <= MAXB in rows and columns L to I has split
- off. Use the double-shift QR algorithm to handle it.
-*/
-
- dlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1],
- ilo, ihi, &z__[z_offset], ldz, info);
- if (*info > 0) {
- return 0;
- }
-
-/*
- Decrement number of remaining iterations, and return to start of
- the main loop with a new value of I.
-*/
-
- itn -= its;
- i__ = l - 1;
- goto L50;
-
-L170:
- work[1] = (doublereal) max(1,*n);
- return 0;
-
-/* End of DHSEQR */
-
-} /* dhseqr_ */
-
-/* Subroutine */ int dlabad_(doublereal *small, doublereal *large)
-{
- /* Builtin functions */
- double d_lg10(doublereal *), sqrt(doublereal);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLABAD takes as input the values computed by DLAMCH for underflow and
- overflow, and returns the square root of each of these values if the
- log of LARGE is sufficiently large. This subroutine is intended to
- identify machines with a large exponent range, such as the Crays, and
- redefine the underflow and overflow limits to be the square roots of
- the values computed by DLAMCH. This subroutine is needed because
- DLAMCH does not compensate for poor arithmetic in the upper half of
- the exponent range, as is found on a Cray.
-
- Arguments
- =========
-
- SMALL (input/output) DOUBLE PRECISION
- On entry, the underflow threshold as computed by DLAMCH.
- On exit, if LOG10(LARGE) is sufficiently large, the square
- root of SMALL, otherwise unchanged.
-
- LARGE (input/output) DOUBLE PRECISION
- On entry, the overflow threshold as computed by DLAMCH.
- On exit, if LOG10(LARGE) is sufficiently large, the square
- root of LARGE, otherwise unchanged.
-
- =====================================================================
-
-
- If it looks like we're on a Cray, take the square root of
- SMALL and LARGE to avoid overflow and underflow problems.
-*/
-
- if (d_lg10(large) > 2e3) {
- *small = sqrt(*small);
- *large = sqrt(*large);
- }
-
- return 0;
-
-/* End of DLABAD */
-
-} /* dlabad_ */
-
-/* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal *
- a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq,
- doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer
- *ldy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
- i__3;
-
- /* Local variables */
- static integer i__;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *), dgemv_(char *, integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLABRD reduces the first NB rows and columns of a real general
- m by n matrix A to upper or lower bidiagonal form by an orthogonal
- transformation Q' * A * P, and returns the matrices X and Y which
- are needed to apply the transformation to the unreduced part of A.
-
- If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
- bidiagonal form.
-
- This is an auxiliary routine called by DGEBRD
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows in the matrix A.
-
- N (input) INTEGER
- The number of columns in the matrix A.
-
- NB (input) INTEGER
- The number of leading rows and columns of A to be reduced.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the m by n general matrix to be reduced.
- On exit, the first NB rows and columns of the matrix are
- overwritten; the rest of the array is unchanged.
- If m >= n, elements on and below the diagonal in the first NB
- columns, with the array TAUQ, represent the orthogonal
- matrix Q as a product of elementary reflectors; and
- elements above the diagonal in the first NB rows, with the
- array TAUP, represent the orthogonal matrix P as a product
- of elementary reflectors.
- If m < n, elements below the diagonal in the first NB
- columns, with the array TAUQ, represent the orthogonal
- matrix Q as a product of elementary reflectors, and
- elements on and above the diagonal in the first NB rows,
- with the array TAUP, represent the orthogonal matrix P as
- a product of elementary reflectors.
- See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- D (output) DOUBLE PRECISION array, dimension (NB)
- The diagonal elements of the first NB rows and columns of
- the reduced matrix. D(i) = A(i,i).
-
- E (output) DOUBLE PRECISION array, dimension (NB)
- The off-diagonal elements of the first NB rows and columns of
- the reduced matrix.
-
- TAUQ (output) DOUBLE PRECISION array dimension (NB)
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix Q. See Further Details.
-
- TAUP (output) DOUBLE PRECISION array, dimension (NB)
- The scalar factors of the elementary reflectors which
- represent the orthogonal matrix P. See Further Details.
-
- X (output) DOUBLE PRECISION array, dimension (LDX,NB)
- The m-by-nb matrix X required to update the unreduced part
- of A.
-
- LDX (input) INTEGER
- The leading dimension of the array X. LDX >= M.
-
- Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
- The n-by-nb matrix Y required to update the unreduced part
- of A.
-
- LDY (output) INTEGER
- The leading dimension of the array Y. LDY >= N.
-
- Further Details
- ===============
-
- The matrices Q and P are represented as products of elementary
- reflectors:
-
- Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
-
- Each H(i) and G(i) has the form:
-
- H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
-
- where tauq and taup are real scalars, and v and u are real vectors.
-
- If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
- A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
- A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
- A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
- A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
-
- The elements of the vectors v and u together form the m-by-nb matrix
- V and the nb-by-n matrix U' which are needed, with X and Y, to apply
- the transformation to the unreduced part of the matrix, using a block
- update of the form: A := A - V*Y' - X*U'.
-
- The contents of A on exit are illustrated by the following examples
- with nb = 2:
-
- m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
-
- ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
- ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
- ( v1 v2 a a a ) ( v1 1 a a a a )
- ( v1 v2 a a a ) ( v1 v2 a a a a )
- ( v1 v2 a a a ) ( v1 v2 a a a a )
- ( v1 v2 a a a )
-
- where a denotes an element of the original matrix which is unchanged,
- vi denotes an element of the vector defining H(i), and ui an element
- of the vector defining G(i).
-
- =====================================================================
-
-
- Quick return if possible
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --d__;
- --e;
- --tauq;
- --taup;
- x_dim1 = *ldx;
- x_offset = 1 + x_dim1 * 1;
- x -= x_offset;
- y_dim1 = *ldy;
- y_offset = 1 + y_dim1 * 1;
- y -= y_offset;
-
- /* Function Body */
- if (*m <= 0 || *n <= 0) {
- return 0;
- }
-
- if (*m >= *n) {
-
-/* Reduce to upper bidiagonal form */
-
- i__1 = *nb;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Update A(i:m,i) */
-
- i__2 = *m - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1],
- lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ + i__ * a_dim1]
- , &c__1);
- i__2 = *m - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + x_dim1],
- ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[i__ + i__ *
- a_dim1], &c__1);
-
-/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
-
- i__2 = *m - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
- a_dim1], &c__1, &tauq[i__]);
- d__[i__] = a[i__ + i__ * a_dim1];
- if (i__ < *n) {
- a[i__ + i__ * a_dim1] = 1.;
-
-/* Compute Y(i+1:n,i) */
-
- i__2 = *m - i__ + 1;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + (i__ + 1) *
- a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29,
- &y[i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *m - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + a_dim1],
- lda, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
- y_dim1 + 1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 +
- y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[
- i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *m - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &x[i__ + x_dim1],
- ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
- y_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) *
- a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15,
- &y[i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *n - i__;
- dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
-
-/* Update A(i,i+1:n) */
-
- i__2 = *n - i__;
- dgemv_("No transpose", &i__2, &i__, &c_b151, &y[i__ + 1 +
- y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ +
- (i__ + 1) * a_dim1], lda);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1) *
- a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b15, &a[
- i__ + (i__ + 1) * a_dim1], lda);
-
-/* Generate reflection P(i) to annihilate A(i,i+2:n) */
-
- i__2 = *n - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
- i__3,*n) * a_dim1], lda, &taup[i__]);
- e[i__] = a[i__ + (i__ + 1) * a_dim1];
- a[i__ + (i__ + 1) * a_dim1] = 1.;
-
-/* Compute X(i+1:m,i) */
-
- i__2 = *m - i__;
- i__3 = *n - i__;
- dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (
- i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
- lda, &c_b29, &x[i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = *n - i__;
- dgemv_("Transpose", &i__2, &i__, &c_b15, &y[i__ + 1 + y_dim1],
- ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &c_b29, &x[
- i__ * x_dim1 + 1], &c__1);
- i__2 = *m - i__;
- dgemv_("No transpose", &i__2, &i__, &c_b151, &a[i__ + 1 +
- a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
- i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) *
- a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
- c_b29, &x[i__ * x_dim1 + 1], &c__1);
- i__2 = *m - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 +
- x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
- i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = *m - i__;
- dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
- }
-/* L10: */
- }
- } else {
-
-/* Reduce to lower bidiagonal form */
-
- i__1 = *nb;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Update A(i,i:n) */
-
- i__2 = *n - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + y_dim1],
- ldy, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1]
- , lda);
- i__2 = i__ - 1;
- i__3 = *n - i__ + 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[i__ * a_dim1 + 1],
- lda, &x[i__ + x_dim1], ldx, &c_b15, &a[i__ + i__ * a_dim1]
- , lda);
-
-/* Generate reflection P(i) to annihilate A(i,i+1:n) */
-
- i__2 = *n - i__ + 1;
-/* Computing MIN */
- i__3 = i__ + 1;
- dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
- a_dim1], lda, &taup[i__]);
- d__[i__] = a[i__ + i__ * a_dim1];
- if (i__ < *m) {
- a[i__ + i__ * a_dim1] = 1.;
-
-/* Compute X(i+1:m,i) */
-
- i__2 = *m - i__;
- i__3 = *n - i__ + 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + i__
- * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &
- x[i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = *n - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &y[i__ + y_dim1],
- ldy, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[i__ *
- x_dim1 + 1], &c__1);
- i__2 = *m - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
- a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
- i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__ + 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b15, &a[i__ * a_dim1
- + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b29, &x[
- i__ * x_dim1 + 1], &c__1);
- i__2 = *m - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &x[i__ + 1 +
- x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b15, &x[
- i__ + 1 + i__ * x_dim1], &c__1);
- i__2 = *m - i__;
- dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
-
-/* Update A(i+1:m,i) */
-
- i__2 = *m - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
- a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b15, &a[i__ +
- 1 + i__ * a_dim1], &c__1);
- i__2 = *m - i__;
- dgemv_("No transpose", &i__2, &i__, &c_b151, &x[i__ + 1 +
- x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b15, &a[
- i__ + 1 + i__ * a_dim1], &c__1);
-
-/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
-
- i__2 = *m - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
- i__ * a_dim1], &c__1, &tauq[i__]);
- e[i__] = a[i__ + 1 + i__ * a_dim1];
- a[i__ + 1 + i__ * a_dim1] = 1.;
-
-/* Compute Y(i+1:n,i) */
-
- i__2 = *m - i__;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + (i__ +
- 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1,
- &c_b29, &y[i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *m - i__;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1]
- , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[
- i__ * y_dim1 + 1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &y[i__ + 1 +
- y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b15, &y[
- i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *m - i__;
- dgemv_("Transpose", &i__2, &i__, &c_b15, &x[i__ + 1 + x_dim1],
- ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &y[
- i__ * y_dim1 + 1], &c__1);
- i__2 = *n - i__;
- dgemv_("Transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) *
- a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &c_b15,
- &y[i__ + 1 + i__ * y_dim1], &c__1);
- i__2 = *n - i__;
- dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
- }
-/* L20: */
- }
- }
- return 0;
-
-/* End of DLABRD */
-
-} /* dlabrd_ */
-
-/* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal *
- a, integer *lda, doublereal *b, integer *ldb)
-{
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j;
- extern logical lsame_(char *, char *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLACPY copies all or part of a two-dimensional matrix A to another
- matrix B.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- Specifies the part of the matrix A to be copied to B.
- = 'U': Upper triangular part
- = 'L': Lower triangular part
- Otherwise: All of the matrix A
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- The m by n matrix A. If UPLO = 'U', only the upper triangle
- or trapezoid is accessed; if UPLO = 'L', only the lower
- triangle or trapezoid is accessed.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- B (output) DOUBLE PRECISION array, dimension (LDB,N)
- On exit, B = A in the locations specified by UPLO.
-
- LDB (input) INTEGER
- The leading dimension of the array B. LDB >= max(1,M).
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
-
- /* Function Body */
- if (lsame_(uplo, "U")) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = min(j,*m);
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
-/* L10: */
- }
-/* L20: */
- }
- } else if (lsame_(uplo, "L")) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = j; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
-/* L30: */
- }
-/* L40: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
-/* L50: */
- }
-/* L60: */
- }
- }
- return 0;
-
-/* End of DLACPY */
-
-} /* dlacpy_ */
-
-/* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__,
- doublereal *d__, doublereal *p, doublereal *q)
-{
- static doublereal e, f;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLADIV performs complex division in real arithmetic
-
- a + i*b
- p + i*q = ---------
- c + i*d
-
- The algorithm is due to Robert L. Smith and can be found
- in D. Knuth, The art of Computer Programming, Vol.2, p.195
-
- Arguments
- =========
-
- A (input) DOUBLE PRECISION
- B (input) DOUBLE PRECISION
- C (input) DOUBLE PRECISION
- D (input) DOUBLE PRECISION
- The scalars a, b, c, and d in the above expression.
-
- P (output) DOUBLE PRECISION
- Q (output) DOUBLE PRECISION
- The scalars p and q in the above expression.
-
- =====================================================================
-*/
-
-
- if (abs(*d__) < abs(*c__)) {
- e = *d__ / *c__;
- f = *c__ + *d__ * e;
- *p = (*a + *b * e) / f;
- *q = (*b - *a * e) / f;
- } else {
- e = *c__ / *d__;
- f = *d__ + *c__ * e;
- *p = (*b + *a * e) / f;
- *q = (-(*a) + *b * e) / f;
- }
-
- return 0;
-
-/* End of DLADIV */
-
-} /* dladiv_ */
-
-/* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__,
- doublereal *rt1, doublereal *rt2)
-{
- /* System generated locals */
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal ab, df, tb, sm, rt, adf, acmn, acmx;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix
- [ A B ]
- [ B C ].
- On return, RT1 is the eigenvalue of larger absolute value, and RT2
- is the eigenvalue of smaller absolute value.
-
- Arguments
- =========
-
- A (input) DOUBLE PRECISION
- The (1,1) element of the 2-by-2 matrix.
-
- B (input) DOUBLE PRECISION
- The (1,2) and (2,1) elements of the 2-by-2 matrix.
-
- C (input) DOUBLE PRECISION
- The (2,2) element of the 2-by-2 matrix.
-
- RT1 (output) DOUBLE PRECISION
- The eigenvalue of larger absolute value.
-
- RT2 (output) DOUBLE PRECISION
- The eigenvalue of smaller absolute value.
-
- Further Details
- ===============
-
- RT1 is accurate to a few ulps barring over/underflow.
-
- RT2 may be inaccurate if there is massive cancellation in the
- determinant A*C-B*B; higher precision or correctly rounded or
- correctly truncated arithmetic would be needed to compute RT2
- accurately in all cases.
-
- Overflow is possible only if RT1 is within a factor of 5 of overflow.
- Underflow is harmless if the input data is 0 or exceeds
- underflow_threshold / macheps.
-
- =====================================================================
-
-
- Compute the eigenvalues
-*/
-
- sm = *a + *c__;
- df = *a - *c__;
- adf = abs(df);
- tb = *b + *b;
- ab = abs(tb);
- if (abs(*a) > abs(*c__)) {
- acmx = *a;
- acmn = *c__;
- } else {
- acmx = *c__;
- acmn = *a;
- }
- if (adf > ab) {
-/* Computing 2nd power */
- d__1 = ab / adf;
- rt = adf * sqrt(d__1 * d__1 + 1.);
- } else if (adf < ab) {
-/* Computing 2nd power */
- d__1 = adf / ab;
- rt = ab * sqrt(d__1 * d__1 + 1.);
- } else {
-
-/* Includes case AB=ADF=0 */
-
- rt = ab * sqrt(2.);
- }
- if (sm < 0.) {
- *rt1 = (sm - rt) * .5;
-
-/*
- Order of execution important.
- To get fully accurate smaller eigenvalue,
- next line needs to be executed in higher precision.
-*/
-
- *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
- } else if (sm > 0.) {
- *rt1 = (sm + rt) * .5;
-
-/*
- Order of execution important.
- To get fully accurate smaller eigenvalue,
- next line needs to be executed in higher precision.
-*/
-
- *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
- } else {
-
-/* Includes case RT1 = RT2 = 0 */
-
- *rt1 = rt * .5;
- *rt2 = rt * -.5;
- }
- return 0;
-
-/* End of DLAE2 */
-
-} /* dlae2_ */
-
-/* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n,
- doublereal *d__, doublereal *e, doublereal *q, integer *ldq,
- doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork,
- integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
- doublereal d__1;
-
- /* Builtin functions */
- double log(doublereal);
- integer pow_ii(integer *, integer *);
-
- /* Local variables */
- static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
- static doublereal temp;
- static integer curr;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer iperm;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer indxq, iwrem;
- extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- integer *, integer *);
- static integer iqptr;
- extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *,
- integer *, integer *, integer *, doublereal *, doublereal *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- integer *, integer *, integer *, integer *, integer *, doublereal
- *, doublereal *, integer *, integer *);
- static integer tlvls;
- extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *);
- static integer igivcl;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer igivnm, submat, curprb, subpbs, igivpt;
- extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, integer *);
- static integer curlvl, matsiz, iprmpt, smlsiz;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAED0 computes all eigenvalues and corresponding eigenvectors of a
- symmetric tridiagonal matrix using the divide and conquer method.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- = 0: Compute eigenvalues only.
- = 1: Compute eigenvectors of original dense symmetric matrix
- also. On entry, Q contains the orthogonal matrix used
- to reduce the original matrix to tridiagonal form.
- = 2: Compute eigenvalues and eigenvectors of tridiagonal
- matrix.
-
- QSIZ (input) INTEGER
- The dimension of the orthogonal matrix used to reduce
- the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the main diagonal of the tridiagonal matrix.
- On exit, its eigenvalues.
-
- E (input) DOUBLE PRECISION array, dimension (N-1)
- The off-diagonal elements of the tridiagonal matrix.
- On exit, E has been destroyed.
-
- Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
- On entry, Q must contain an N-by-N orthogonal matrix.
- If ICOMPQ = 0 Q is not referenced.
- If ICOMPQ = 1 On entry, Q is a subset of the columns of the
- orthogonal matrix used to reduce the full
- matrix to tridiagonal form corresponding to
- the subset of the full matrix which is being
- decomposed at this time.
- If ICOMPQ = 2 On entry, Q will be the identity matrix.
- On exit, Q contains the eigenvectors of the
- tridiagonal matrix.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. If eigenvectors are
- desired, then LDQ >= max(1,N). In any case, LDQ >= 1.
-
- QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N)
- Referenced only when ICOMPQ = 1. Used to store parts of
- the eigenvector matrix when the updating matrix multiplies
- take place.
-
- LDQS (input) INTEGER
- The leading dimension of the array QSTORE. If ICOMPQ = 1,
- then LDQS >= max(1,N). In any case, LDQS >= 1.
-
- WORK (workspace) DOUBLE PRECISION array,
- If ICOMPQ = 0 or 1, the dimension of WORK must be at least
- 1 + 3*N + 2*N*lg N + 2*N**2
- ( lg( N ) = smallest integer k
- such that 2^k >= N )
- If ICOMPQ = 2, the dimension of WORK must be at least
- 4*N + N**2.
-
- IWORK (workspace) INTEGER array,
- If ICOMPQ = 0 or 1, the dimension of IWORK must be at least
- 6 + 6*N + 5*N*lg N.
- ( lg( N ) = smallest integer k
- such that 2^k >= N )
- If ICOMPQ = 2, the dimension of IWORK must be at least
- 3 + 5*N.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: The algorithm failed to compute an eigenvalue while
- working on the submatrix lying in rows and columns
- INFO/(N+1) through mod(INFO,N+1).
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- qstore_dim1 = *ldqs;
- qstore_offset = 1 + qstore_dim1 * 1;
- qstore -= qstore_offset;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 2) {
- *info = -1;
- } else if ((*icompq == 1 && *qsiz < max(0,*n))) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*ldq < max(1,*n)) {
- *info = -7;
- } else if (*ldqs < max(1,*n)) {
- *info = -9;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED0", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
- ftnlen)6, (ftnlen)1);
-
-/*
- Determine the size and placement of the submatrices, and save in
- the leading elements of IWORK.
-*/
-
- iwork[1] = *n;
- subpbs = 1;
- tlvls = 0;
-L10:
- if (iwork[subpbs] > smlsiz) {
- for (j = subpbs; j >= 1; --j) {
- iwork[j * 2] = (iwork[j] + 1) / 2;
- iwork[((j) << (1)) - 1] = iwork[j] / 2;
-/* L20: */
- }
- ++tlvls;
- subpbs <<= 1;
- goto L10;
- }
- i__1 = subpbs;
- for (j = 2; j <= i__1; ++j) {
- iwork[j] += iwork[j - 1];
-/* L30: */
- }
-
-/*
- Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
- using rank-1 modifications (cuts).
-*/
-
- spm1 = subpbs - 1;
- i__1 = spm1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- submat = iwork[i__] + 1;
- smm1 = submat - 1;
- d__[smm1] -= (d__1 = e[smm1], abs(d__1));
- d__[submat] -= (d__1 = e[smm1], abs(d__1));
-/* L40: */
- }
-
- indxq = ((*n) << (2)) + 3;
- if (*icompq != 2) {
-
-/*
- Set up workspaces for eigenvalues only/accumulate new vectors
- routine
-*/
-
- temp = log((doublereal) (*n)) / log(2.);
- lgn = (integer) temp;
- if (pow_ii(&c__2, &lgn) < *n) {
- ++lgn;
- }
- if (pow_ii(&c__2, &lgn) < *n) {
- ++lgn;
- }
- iprmpt = indxq + *n + 1;
- iperm = iprmpt + *n * lgn;
- iqptr = iperm + *n * lgn;
- igivpt = iqptr + *n + 2;
- igivcl = igivpt + *n * lgn;
-
- igivnm = 1;
- iq = igivnm + ((*n) << (1)) * lgn;
-/* Computing 2nd power */
- i__1 = *n;
- iwrem = iq + i__1 * i__1 + 1;
-
-/* Initialize pointers */
-
- i__1 = subpbs;
- for (i__ = 0; i__ <= i__1; ++i__) {
- iwork[iprmpt + i__] = 1;
- iwork[igivpt + i__] = 1;
-/* L50: */
- }
- iwork[iqptr] = 1;
- }
-
-/*
- Solve each submatrix eigenproblem at the bottom of the divide and
- conquer tree.
-*/
-
- curr = 0;
- i__1 = spm1;
- for (i__ = 0; i__ <= i__1; ++i__) {
- if (i__ == 0) {
- submat = 1;
- matsiz = iwork[1];
- } else {
- submat = iwork[i__] + 1;
- matsiz = iwork[i__ + 1] - iwork[i__];
- }
- if (*icompq == 2) {
- dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
- submat * q_dim1], ldq, &work[1], info);
- if (*info != 0) {
- goto L130;
- }
- } else {
- dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
- iwork[iqptr + curr]], &matsiz, &work[1], info);
- if (*info != 0) {
- goto L130;
- }
- if (*icompq == 1) {
- dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b15, &q[submat *
- q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]],
- &matsiz, &c_b29, &qstore[submat * qstore_dim1 + 1],
- ldqs);
- }
-/* Computing 2nd power */
- i__2 = matsiz;
- iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
- ++curr;
- }
- k = 1;
- i__2 = iwork[i__ + 1];
- for (j = submat; j <= i__2; ++j) {
- iwork[indxq + j] = k;
- ++k;
-/* L60: */
- }
-/* L70: */
- }
-
-/*
- Successively merge eigensystems of adjacent submatrices
- into eigensystem for the corresponding larger matrix.
-
- while ( SUBPBS > 1 )
-*/
-
- curlvl = 1;
-L80:
- if (subpbs > 1) {
- spm2 = subpbs - 2;
- i__1 = spm2;
- for (i__ = 0; i__ <= i__1; i__ += 2) {
- if (i__ == 0) {
- submat = 1;
- matsiz = iwork[2];
- msd2 = iwork[1];
- curprb = 0;
- } else {
- submat = iwork[i__] + 1;
- matsiz = iwork[i__ + 2] - iwork[i__];
- msd2 = matsiz / 2;
- ++curprb;
- }
-
-/*
- Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
- into an eigensystem of size MATSIZ.
- DLAED1 is used only for the full eigensystem of a tridiagonal
- matrix.
- DLAED7 handles the cases in which eigenvalues only or eigenvalues
- and eigenvectors of a full symmetric matrix (which was reduced to
- tridiagonal form) are desired.
-*/
-
- if (*icompq == 2) {
- dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
- ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
- msd2, &work[1], &iwork[subpbs + 1], info);
- } else {
- dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
- submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
- iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
- work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
- , &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
- work[iwrem], &iwork[subpbs + 1], info);
- }
- if (*info != 0) {
- goto L130;
- }
- iwork[i__ / 2 + 1] = iwork[i__ + 2];
-/* L90: */
- }
- subpbs /= 2;
- ++curlvl;
- goto L80;
- }
-
-/*
- end while
-
- Re-merge the eigenvalues/vectors which were deflated at the final
- merge step.
-*/
-
- if (*icompq == 1) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- j = iwork[indxq + i__];
- work[i__] = d__[j];
- dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
- + 1], &c__1);
-/* L100: */
- }
- dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
- } else if (*icompq == 2) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- j = iwork[indxq + i__];
- work[i__] = d__[j];
- dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
-/* L110: */
- }
- dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
- dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- j = iwork[indxq + i__];
- work[i__] = d__[j];
-/* L120: */
- }
- dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
- }
- goto L140;
-
-L130:
- *info = submat * (*n + 1) + submat + matsiz - 1;
-
-L140:
- return 0;
-
-/* End of DLAED0 */
-
-} /* dlaed0_ */
-
-/* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q,
- integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt,
- doublereal *work, integer *iwork, integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, k, n1, n2, is, iw, iz, iq2, zpp1, indx, indxc;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer indxp;
- extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- integer *, integer *, integer *, integer *), dlaed3_(integer *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, doublereal *, doublereal *, integer *, integer *,
- doublereal *, doublereal *, integer *);
- static integer idlmda;
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), xerbla_(char *, integer *);
- static integer coltyp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAED1 computes the updated eigensystem of a diagonal
- matrix after modification by a rank-one symmetric matrix. This
- routine is used only for the eigenproblem which requires all
- eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles
- the case in which eigenvalues only or eigenvalues and eigenvectors
- of a full symmetric matrix (which was reduced to tridiagonal form)
- are desired.
-
- T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
-
- where Z = Q'u, u is a vector of length N with ones in the
- CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
-
- The eigenvectors of the original matrix are stored in Q, and the
- eigenvalues are in D. The algorithm consists of three stages:
-
- The first stage consists of deflating the size of the problem
- when there are multiple eigenvalues or if there is a zero in
- the Z vector. For each such occurence the dimension of the
- secular equation problem is reduced by one. This stage is
- performed by the routine DLAED2.
-
- The second stage consists of calculating the updated
- eigenvalues. This is done by finding the roots of the secular
- equation via the routine DLAED4 (as called by DLAED3).
- This routine also calculates the eigenvectors of the current
- problem.
-
- The final stage consists of computing the updated eigenvectors
- directly using the updated eigenvalues. The eigenvectors for
- the current problem are multiplied with the eigenvectors from
- the overall problem.
-
- Arguments
- =========
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the eigenvalues of the rank-1-perturbed matrix.
- On exit, the eigenvalues of the repaired matrix.
-
- Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
- On entry, the eigenvectors of the rank-1-perturbed matrix.
- On exit, the eigenvectors of the repaired tridiagonal matrix.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max(1,N).
-
- INDXQ (input/output) INTEGER array, dimension (N)
- On entry, the permutation which separately sorts the two
- subproblems in D into ascending order.
- On exit, the permutation which will reintegrate the
- subproblems back into sorted order,
- i.e. D( INDXQ( I = 1, N ) ) will be in ascending order.
-
- RHO (input) DOUBLE PRECISION
- The subdiagonal entry used to create the rank-1 modification.
-
- CUTPNT (input) INTEGER
- The location of the last eigenvalue in the leading sub-matrix.
- min(1,N) <= CUTPNT <= N/2.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2)
-
- IWORK (workspace) INTEGER array, dimension (4*N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an eigenvalue did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
- Modified by Francoise Tisseur, University of Tennessee.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --indxq;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*n < 0) {
- *info = -1;
- } else if (*ldq < max(1,*n)) {
- *info = -4;
- } else /* if(complicated condition) */ {
-/* Computing MIN */
- i__1 = 1, i__2 = *n / 2;
- if (min(i__1,i__2) > *cutpnt || *n / 2 < *cutpnt) {
- *info = -7;
- }
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED1", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
-/*
- The following values are integer pointers which indicate
- the portion of the workspace
- used by a particular array in DLAED2 and DLAED3.
-*/
-
- iz = 1;
- idlmda = iz + *n;
- iw = idlmda + *n;
- iq2 = iw + *n;
-
- indx = 1;
- indxc = indx + *n;
- coltyp = indxc + *n;
- indxp = coltyp + *n;
-
-
-/*
- Form the z-vector which consists of the last row of Q_1 and the
- first row of Q_2.
-*/
-
- dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
- zpp1 = *cutpnt + 1;
- i__1 = *n - *cutpnt;
- dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
-
-/* Deflate eigenvalues. */
-
- dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
- iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
- indxc], &iwork[indxp], &iwork[coltyp], info);
-
- if (*info != 0) {
- goto L20;
- }
-
-/* Solve Secular Equation. */
-
- if (k != 0) {
- is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
- 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
- dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
- &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
- is], info);
- if (*info != 0) {
- goto L20;
- }
-
-/* Prepare the INDXQ sorting permutation. */
-
- n1 = k;
- n2 = *n - k;
- dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- indxq[i__] = i__;
-/* L10: */
- }
- }
-
-L20:
- return 0;
-
-/* End of DLAED1 */
-
-} /* dlaed1_ */
-
-/* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal *
- d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho,
- doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2,
- integer *indx, integer *indxc, integer *indxp, integer *coltyp,
- integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, i__1, i__2;
- doublereal d__1, d__2, d__3, d__4;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal c__;
- static integer i__, j;
- static doublereal s, t;
- static integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
- static doublereal eps, tau, tol;
- static integer psm[4], imax, jmax;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer ctot[4];
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *), dcopy_(integer *, doublereal *, integer *, doublereal
- *, integer *);
-
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), dlacpy_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLAED2 merges the two sets of eigenvalues together into a single
- sorted set. Then it tries to deflate the size of the problem.
- There are two ways in which deflation can occur: when two or more
- eigenvalues are close together or if there is a tiny entry in the
- Z vector. For each such occurrence the order of the related secular
- equation problem is reduced by one.
-
- Arguments
- =========
-
- K (output) INTEGER
- The number of non-deflated eigenvalues, and the order of the
- related secular equation. 0 <= K <=N.
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- N1 (input) INTEGER
- The location of the last eigenvalue in the leading sub-matrix.
- min(1,N) <= N1 <= N/2.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, D contains the eigenvalues of the two submatrices to
- be combined.
- On exit, D contains the trailing (N-K) updated eigenvalues
- (those which were deflated) sorted into increasing order.
-
- Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
- On entry, Q contains the eigenvectors of two submatrices in
- the two square blocks with corners at (1,1), (N1,N1)
- and (N1+1, N1+1), (N,N).
- On exit, Q contains the trailing (N-K) updated eigenvectors
- (those which were deflated) in its last N-K columns.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max(1,N).
-
- INDXQ (input/output) INTEGER array, dimension (N)
- The permutation which separately sorts the two sub-problems
- in D into ascending order. Note that elements in the second
- half of this permutation must first have N1 added to their
- values. Destroyed on exit.
-
- RHO (input/output) DOUBLE PRECISION
- On entry, the off-diagonal element associated with the rank-1
- cut which originally split the two submatrices which are now
- being recombined.
- On exit, RHO has been modified to the value required by
- DLAED3.
-
- Z (input) DOUBLE PRECISION array, dimension (N)
- On entry, Z contains the updating vector (the last
- row of the first sub-eigenvector matrix and the first row of
- the second sub-eigenvector matrix).
- On exit, the contents of Z have been destroyed by the updating
- process.
-
- DLAMDA (output) DOUBLE PRECISION array, dimension (N)
- A copy of the first K eigenvalues which will be used by
- DLAED3 to form the secular equation.
-
- W (output) DOUBLE PRECISION array, dimension (N)
- The first k values of the final deflation-altered z-vector
- which will be passed to DLAED3.
-
- Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2)
- A copy of the first K eigenvectors which will be used by
- DLAED3 in a matrix multiply (DGEMM) to solve for the new
- eigenvectors.
-
- INDX (workspace) INTEGER array, dimension (N)
- The permutation used to sort the contents of DLAMDA into
- ascending order.
-
- INDXC (output) INTEGER array, dimension (N)
- The permutation used to arrange the columns of the deflated
- Q matrix into three groups: the first group contains non-zero
- elements only at and above N1, the second contains
- non-zero elements only below N1, and the third is dense.
-
- INDXP (workspace) INTEGER array, dimension (N)
- The permutation used to place deflated values of D at the end
- of the array. INDXP(1:K) points to the nondeflated D-values
- and INDXP(K+1:N) points to the deflated eigenvalues.
-
- COLTYP (workspace/output) INTEGER array, dimension (N)
- During execution, a label which will indicate which of the
- following types a column in the Q2 matrix is:
- 1 : non-zero in the upper half only;
- 2 : dense;
- 3 : non-zero in the lower half only;
- 4 : deflated.
- On exit, COLTYP(i) is the number of columns of type i,
- for i=1 to 4 only.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
- Modified by Francoise Tisseur, University of Tennessee.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --indxq;
- --z__;
- --dlamda;
- --w;
- --q2;
- --indx;
- --indxc;
- --indxp;
- --coltyp;
-
- /* Function Body */
- *info = 0;
-
- if (*n < 0) {
- *info = -2;
- } else if (*ldq < max(1,*n)) {
- *info = -6;
- } else /* if(complicated condition) */ {
-/* Computing MIN */
- i__1 = 1, i__2 = *n / 2;
- if (min(i__1,i__2) > *n1 || *n / 2 < *n1) {
- *info = -3;
- }
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- n2 = *n - *n1;
- n1p1 = *n1 + 1;
-
- if (*rho < 0.) {
- dscal_(&n2, &c_b151, &z__[n1p1], &c__1);
- }
-
-/*
- Normalize z so that norm(z) = 1. Since z is the concatenation of
- two normalized vectors, norm2(z) = sqrt(2).
-*/
-
- t = 1. / sqrt(2.);
- dscal_(n, &t, &z__[1], &c__1);
-
-/* RHO = ABS( norm(z)**2 * RHO ) */
-
- *rho = (d__1 = *rho * 2., abs(d__1));
-
-/* Sort the eigenvalues into increasing order */
-
- i__1 = *n;
- for (i__ = n1p1; i__ <= i__1; ++i__) {
- indxq[i__] += *n1;
-/* L10: */
- }
-
-/* re-integrate the deflated parts from the last pass */
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlamda[i__] = d__[indxq[i__]];
-/* L20: */
- }
- dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- indx[i__] = indxq[indxc[i__]];
-/* L30: */
- }
-
-/* Calculate the allowable deflation tolerance */
-
- imax = idamax_(n, &z__[1], &c__1);
- jmax = idamax_(n, &d__[1], &c__1);
- eps = EPSILON;
-/* Computing MAX */
- d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2))
- ;
- tol = eps * 8. * max(d__3,d__4);
-
-/*
- If the rank-1 modifier is small enough, no more needs to be done
- except to reorganize Q so that its columns correspond with the
- elements in D.
-*/
-
- if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
- *k = 0;
- iq2 = 1;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__ = indx[j];
- dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
- dlamda[j] = d__[i__];
- iq2 += *n;
-/* L40: */
- }
- dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
- dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
- goto L190;
- }
-
-/*
- If there are multiple eigenvalues then the problem deflates. Here
- the number of equal eigenvalues are found. As each equal
- eigenvalue is found, an elementary reflector is computed to rotate
- the corresponding eigensubspace so that the corresponding
- components of Z are zero in this new basis.
-*/
-
- i__1 = *n1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- coltyp[i__] = 1;
-/* L50: */
- }
- i__1 = *n;
- for (i__ = n1p1; i__ <= i__1; ++i__) {
- coltyp[i__] = 3;
-/* L60: */
- }
-
-
- *k = 0;
- k2 = *n + 1;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- nj = indx[j];
- if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- coltyp[nj] = 4;
- indxp[k2] = nj;
- if (j == *n) {
- goto L100;
- }
- } else {
- pj = nj;
- goto L80;
- }
-/* L70: */
- }
-L80:
- ++j;
- nj = indx[j];
- if (j > *n) {
- goto L100;
- }
- if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- coltyp[nj] = 4;
- indxp[k2] = nj;
- } else {
-
-/* Check if eigenvalues are close enough to allow deflation. */
-
- s = z__[pj];
- c__ = z__[nj];
-
-/*
- Find sqrt(a**2+b**2) without overflow or
- destructive underflow.
-*/
-
- tau = dlapy2_(&c__, &s);
- t = d__[nj] - d__[pj];
- c__ /= tau;
- s = -s / tau;
- if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
-
-/* Deflation is possible. */
-
- z__[nj] = tau;
- z__[pj] = 0.;
- if (coltyp[nj] != coltyp[pj]) {
- coltyp[nj] = 2;
- }
- coltyp[pj] = 4;
- drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
- c__, &s);
-/* Computing 2nd power */
- d__1 = c__;
-/* Computing 2nd power */
- d__2 = s;
- t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
-/* Computing 2nd power */
- d__1 = s;
-/* Computing 2nd power */
- d__2 = c__;
- d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
- d__[pj] = t;
- --k2;
- i__ = 1;
-L90:
- if (k2 + i__ <= *n) {
- if (d__[pj] < d__[indxp[k2 + i__]]) {
- indxp[k2 + i__ - 1] = indxp[k2 + i__];
- indxp[k2 + i__] = pj;
- ++i__;
- goto L90;
- } else {
- indxp[k2 + i__ - 1] = pj;
- }
- } else {
- indxp[k2 + i__ - 1] = pj;
- }
- pj = nj;
- } else {
- ++(*k);
- dlamda[*k] = d__[pj];
- w[*k] = z__[pj];
- indxp[*k] = pj;
- pj = nj;
- }
- }
- goto L80;
-L100:
-
-/* Record the last eigenvalue. */
-
- ++(*k);
- dlamda[*k] = d__[pj];
- w[*k] = z__[pj];
- indxp[*k] = pj;
-
-/*
- Count up the total number of the various types of columns, then
- form a permutation which positions the four column types into
- four uniform groups (although one or more of these groups may be
- empty).
-*/
-
- for (j = 1; j <= 4; ++j) {
- ctot[j - 1] = 0;
-/* L110: */
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- ct = coltyp[j];
- ++ctot[ct - 1];
-/* L120: */
- }
-
-/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
-
- psm[0] = 1;
- psm[1] = ctot[0] + 1;
- psm[2] = psm[1] + ctot[1];
- psm[3] = psm[2] + ctot[2];
- *k = *n - ctot[3];
-
-/*
- Fill out the INDXC array so that the permutation which it induces
- will place all type-1 columns first, all type-2 columns next,
- then all type-3's, and finally all type-4's.
-*/
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- js = indxp[j];
- ct = coltyp[js];
- indx[psm[ct - 1]] = js;
- indxc[psm[ct - 1]] = j;
- ++psm[ct - 1];
-/* L130: */
- }
-
-/*
- Sort the eigenvalues and corresponding eigenvectors into DLAMDA
- and Q2 respectively. The eigenvalues/vectors which were not
- deflated go into the first K slots of DLAMDA and Q2 respectively,
- while those which were deflated go into the last N - K slots.
-*/
-
- i__ = 1;
- iq1 = 1;
- iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
- i__1 = ctot[0];
- for (j = 1; j <= i__1; ++j) {
- js = indx[i__];
- dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
- z__[i__] = d__[js];
- ++i__;
- iq1 += *n1;
-/* L140: */
- }
-
- i__1 = ctot[1];
- for (j = 1; j <= i__1; ++j) {
- js = indx[i__];
- dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
- dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
- z__[i__] = d__[js];
- ++i__;
- iq1 += *n1;
- iq2 += n2;
-/* L150: */
- }
-
- i__1 = ctot[2];
- for (j = 1; j <= i__1; ++j) {
- js = indx[i__];
- dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
- z__[i__] = d__[js];
- ++i__;
- iq2 += n2;
-/* L160: */
- }
-
- iq1 = iq2;
- i__1 = ctot[3];
- for (j = 1; j <= i__1; ++j) {
- js = indx[i__];
- dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
- iq2 += *n;
- z__[i__] = d__[js];
- ++i__;
-/* L170: */
- }
-
-/*
- The deflated eigenvalues and their corresponding vectors go back
- into the last N - K slots of D and Q respectively.
-*/
-
- dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
- i__1 = *n - *k;
- dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
-
-/* Copy CTOT into COLTYP for referencing in DLAED3. */
-
- for (j = 1; j <= 4; ++j) {
- coltyp[j] = ctot[j - 1];
-/* L180: */
- }
-
-L190:
- return 0;
-
-/* End of DLAED2 */
-
-} /* dlaed2_ */
-
-/* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal *
- d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda,
- doublereal *q2, integer *indx, integer *ctot, doublereal *w,
- doublereal *s, integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, i__1, i__2;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer i__, j, n2, n12, ii, n23, iq2;
- static doublereal temp;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *),
- dcopy_(integer *, doublereal *, integer *, doublereal *, integer
- *), dlaed4_(integer *, integer *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, integer *);
- extern doublereal dlamc3_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *),
- dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
- doublereal *, integer *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAED3 finds the roots of the secular equation, as defined by the
- values in D, W, and RHO, between 1 and K. It makes the
- appropriate calls to DLAED4 and then updates the eigenvectors by
- multiplying the matrix of eigenvectors of the pair of eigensystems
- being combined by the matrix of eigenvectors of the K-by-K system
- which is solved here.
-
- This code makes very mild assumptions about floating point
- arithmetic. It will work on machines with a guard digit in
- add/subtract, or on those binary machines without guard digits
- which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
- It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- Arguments
- =========
-
- K (input) INTEGER
- The number of terms in the rational function to be solved by
- DLAED4. K >= 0.
-
- N (input) INTEGER
- The number of rows and columns in the Q matrix.
- N >= K (deflation may result in N>K).
-
- N1 (input) INTEGER
- The location of the last eigenvalue in the leading submatrix.
- min(1,N) <= N1 <= N/2.
-
- D (output) DOUBLE PRECISION array, dimension (N)
- D(I) contains the updated eigenvalues for
- 1 <= I <= K.
-
- Q (output) DOUBLE PRECISION array, dimension (LDQ,N)
- Initially the first K columns are used as workspace.
- On output the columns 1 to K contain
- the updated eigenvectors.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max(1,N).
-
- RHO (input) DOUBLE PRECISION
- The value of the parameter in the rank one update equation.
- RHO >= 0 required.
-
- DLAMDA (input/output) DOUBLE PRECISION array, dimension (K)
- The first K elements of this array contain the old roots
- of the deflated updating problem. These are the poles
- of the secular equation. May be changed on output by
- having lowest order bit set to zero on Cray X-MP, Cray Y-MP,
- Cray-2, or Cray C-90, as described above.
-
- Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N)
- The first K columns of this matrix contain the non-deflated
- eigenvectors for the split problem.
-
- INDX (input) INTEGER array, dimension (N)
- The permutation used to arrange the columns of the deflated
- Q matrix into three groups (see DLAED2).
- The rows of the eigenvectors found by DLAED4 must be likewise
- permuted before the matrix multiply can take place.
-
- CTOT (input) INTEGER array, dimension (4)
- A count of the total number of the various types of columns
- in Q, as described in INDX. The fourth column type is any
- column which has been deflated.
-
- W (input/output) DOUBLE PRECISION array, dimension (K)
- The first K elements of this array contain the components
- of the deflation-adjusted updating vector. Destroyed on
- output.
-
- S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K
- Will contain the eigenvectors of the repaired matrix which
- will be multiplied by the previously accumulated eigenvectors
- to update the system.
-
- LDS (input) INTEGER
- The leading dimension of S. LDS >= max(1,K).
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an eigenvalue did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
- Modified by Francoise Tisseur, University of Tennessee.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --dlamda;
- --q2;
- --indx;
- --ctot;
- --w;
- --s;
-
- /* Function Body */
- *info = 0;
-
- if (*k < 0) {
- *info = -1;
- } else if (*n < *k) {
- *info = -2;
- } else if (*ldq < max(1,*n)) {
- *info = -6;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED3", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*k == 0) {
- return 0;
- }
-
-/*
- Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
- be computed with high relative accuracy (barring over/underflow).
- This is a problem on machines without a guard digit in
- add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
- The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
- which on any of these machines zeros out the bottommost
- bit of DLAMDA(I) if it is 1; this makes the subsequent
- subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
- occurs. On binary machines with a guard digit (almost all
- machines) it does not change DLAMDA(I) at all. On hexadecimal
- and decimal machines with a guard digit, it slightly
- changes the bottommost bits of DLAMDA(I). It does not account
- for hexadecimal or decimal machines without guard digits
- (we know of none). We use a subroutine call to compute
- 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
- this code.
-*/
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
-/* L10: */
- }
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
- info);
-
-/* If the zero finder fails, the computation is terminated. */
-
- if (*info != 0) {
- goto L120;
- }
-/* L20: */
- }
-
- if (*k == 1) {
- goto L110;
- }
- if (*k == 2) {
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- w[1] = q[j * q_dim1 + 1];
- w[2] = q[j * q_dim1 + 2];
- ii = indx[1];
- q[j * q_dim1 + 1] = w[ii];
- ii = indx[2];
- q[j * q_dim1 + 2] = w[ii];
-/* L30: */
- }
- goto L110;
- }
-
-/* Compute updated W. */
-
- dcopy_(k, &w[1], &c__1, &s[1], &c__1);
-
-/* Initialize W(I) = Q(I,I) */
-
- i__1 = *ldq + 1;
- dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
-/* L40: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
-/* L50: */
- }
-/* L60: */
- }
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__1 = sqrt(-w[i__]);
- w[i__] = d_sign(&d__1, &s[i__]);
-/* L70: */
- }
-
-/* Compute eigenvectors of the modified rank-1 modification. */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *k;
- for (i__ = 1; i__ <= i__2; ++i__) {
- s[i__] = w[i__] / q[i__ + j * q_dim1];
-/* L80: */
- }
- temp = dnrm2_(k, &s[1], &c__1);
- i__2 = *k;
- for (i__ = 1; i__ <= i__2; ++i__) {
- ii = indx[i__];
- q[i__ + j * q_dim1] = s[ii] / temp;
-/* L90: */
- }
-/* L100: */
- }
-
-/* Compute the updated eigenvectors. */
-
-L110:
-
- n2 = *n - *n1;
- n12 = ctot[1] + ctot[2];
- n23 = ctot[2] + ctot[3];
-
- dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
- iq2 = *n1 * n12 + 1;
- if (n23 != 0) {
- dgemm_("N", "N", &n2, k, &n23, &c_b15, &q2[iq2], &n2, &s[1], &n23, &
- c_b29, &q[*n1 + 1 + q_dim1], ldq);
- } else {
- dlaset_("A", &n2, k, &c_b29, &c_b29, &q[*n1 + 1 + q_dim1], ldq);
- }
-
- dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
- if (n12 != 0) {
- dgemm_("N", "N", n1, k, &n12, &c_b15, &q2[1], n1, &s[1], &n12, &c_b29,
- &q[q_offset], ldq);
- } else {
- dlaset_("A", n1, k, &c_b29, &c_b29, &q[q_dim1 + 1], ldq);
- }
-
-
-L120:
- return 0;
-
-/* End of DLAED3 */
-
-} /* dlaed3_ */
-
-/* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__,
- doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam,
- integer *info)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal a, b, c__;
- static integer j;
- static doublereal w;
- static integer ii;
- static doublereal dw, zz[3];
- static integer ip1;
- static doublereal del, eta, phi, eps, tau, psi;
- static integer iim1, iip1;
- static doublereal dphi, dpsi;
- static integer iter;
- static doublereal temp, prew, temp1, dltlb, dltub, midpt;
- static integer niter;
- static logical swtch;
- extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *), dlaed6_(integer *,
- logical *, doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *);
- static logical swtch3;
-
- static logical orgati;
- static doublereal erretm, rhoinv;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- December 23, 1999
-
-
- Purpose
- =======
-
- This subroutine computes the I-th updated eigenvalue of a symmetric
- rank-one modification to a diagonal matrix whose elements are
- given in the array d, and that
-
- D(i) < D(j) for i < j
-
- and that RHO > 0. This is arranged by the calling routine, and is
- no loss in generality. The rank-one modified system is thus
-
- diag( D ) + RHO * Z * Z_transpose.
-
- where we assume the Euclidean norm of Z is 1.
-
- The method consists of approximating the rational functions in the
- secular equation by simpler interpolating rational functions.
-
- Arguments
- =========
-
- N (input) INTEGER
- The length of all arrays.
-
- I (input) INTEGER
- The index of the eigenvalue to be computed. 1 <= I <= N.
-
- D (input) DOUBLE PRECISION array, dimension (N)
- The original eigenvalues. It is assumed that they are in
- order, D(I) < D(J) for I < J.
-
- Z (input) DOUBLE PRECISION array, dimension (N)
- The components of the updating vector.
-
- DELTA (output) DOUBLE PRECISION array, dimension (N)
- If N .ne. 1, DELTA contains (D(j) - lambda_I) in its j-th
- component. If N = 1, then DELTA(1) = 1. The vector DELTA
- contains the information necessary to construct the
- eigenvectors.
-
- RHO (input) DOUBLE PRECISION
- The scalar in the symmetric updating formula.
-
- DLAM (output) DOUBLE PRECISION
- The computed lambda_I, the I-th updated eigenvalue.
-
- INFO (output) INTEGER
- = 0: successful exit
- > 0: if INFO = 1, the updating process failed.
-
- Internal Parameters
- ===================
-
- Logical variable ORGATI (origin-at-i?) is used for distinguishing
- whether D(i) or D(i+1) is treated as the origin.
-
- ORGATI = .true. origin at i
- ORGATI = .false. origin at i+1
-
- Logical variable SWTCH3 (switch-for-3-poles?) is for noting
- if we are working with THREE poles!
-
- MAXIT is the maximum number of iterations allowed for each
- eigenvalue.
-
- Further Details
- ===============
-
- Based on contributions by
- Ren-Cang Li, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Since this routine is called in an inner loop, we do no argument
- checking.
-
- Quick return for N=1 and 2.
-*/
-
- /* Parameter adjustments */
- --delta;
- --z__;
- --d__;
-
- /* Function Body */
- *info = 0;
- if (*n == 1) {
-
-/* Presumably, I=1 upon entry */
-
- *dlam = d__[1] + *rho * z__[1] * z__[1];
- delta[1] = 1.;
- return 0;
- }
- if (*n == 2) {
- dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
- return 0;
- }
-
-/* Compute machine epsilon */
-
- eps = EPSILON;
- rhoinv = 1. / *rho;
-
-/* The case I = N */
-
- if (*i__ == *n) {
-
-/* Initialize some basic variables */
-
- ii = *n - 1;
- niter = 1;
-
-/* Calculate initial guess */
-
- midpt = *rho / 2.;
-
-/*
- If ||Z||_2 is not one, then TEMP should be set to
- RHO * ||Z||_2^2 / TWO
-*/
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[*i__] - midpt;
-/* L10: */
- }
-
- psi = 0.;
- i__1 = *n - 2;
- for (j = 1; j <= i__1; ++j) {
- psi += z__[j] * z__[j] / delta[j];
-/* L20: */
- }
-
- c__ = rhoinv + psi;
- w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
- n];
-
- if (w <= 0.) {
- temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
- + z__[*n] * z__[*n] / *rho;
- if (c__ <= temp) {
- tau = *rho;
- } else {
- del = d__[*n] - d__[*n - 1];
- a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
- ;
- b = z__[*n] * z__[*n] * del;
- if (a < 0.) {
- tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
- } else {
- tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
- }
- }
-
-/*
- It can be proved that
- D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO
-*/
-
- dltlb = midpt;
- dltub = *rho;
- } else {
- del = d__[*n] - d__[*n - 1];
- a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
- b = z__[*n] * z__[*n] * del;
- if (a < 0.) {
- tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
- } else {
- tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
- }
-
-/*
- It can be proved that
- D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2
-*/
-
- dltlb = 0.;
- dltub = midpt;
- }
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[*i__] - tau;
-/* L30: */
- }
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L40: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / delta[*n];
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
- + dphi);
-
- w = rhoinv + phi + psi;
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- *dlam = d__[*i__] + tau;
- goto L250;
- }
-
- if (w <= 0.) {
- dltlb = max(dltlb,tau);
- } else {
- dltub = min(dltub,tau);
- }
-
-/* Calculate the new step */
-
- ++niter;
- c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
- a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
- dpsi + dphi);
- b = delta[*n - 1] * delta[*n] * w;
- if (c__ < 0.) {
- c__ = abs(c__);
- }
- if (c__ == 0.) {
-/*
- ETA = B/A
- ETA = RHO - TAU
-*/
- eta = dltub - tau;
- } else if (a >= 0.) {
- eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
- * 2.);
- } else {
- eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
- );
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta > 0.) {
- eta = -w / (dpsi + dphi);
- }
- temp = tau + eta;
- if (temp > dltub || temp < dltlb) {
- if (w < 0.) {
- eta = (dltub - tau) / 2.;
- } else {
- eta = (dltlb - tau) / 2.;
- }
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
-/* L50: */
- }
-
- tau += eta;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L60: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / delta[*n];
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
- + dphi);
-
- w = rhoinv + phi + psi;
-
-/* Main loop to update the values of the array DELTA */
-
- iter = niter + 1;
-
- for (niter = iter; niter <= MAXITERLOOPS; ++niter) {
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- *dlam = d__[*i__] + tau;
- goto L250;
- }
-
- if (w <= 0.) {
- dltlb = max(dltlb,tau);
- } else {
- dltub = min(dltub,tau);
- }
-
-/* Calculate the new step */
-
- c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
- a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
- (dpsi + dphi);
- b = delta[*n - 1] * delta[*n] * w;
- if (a >= 0.) {
- eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- } else {
- eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta > 0.) {
- eta = -w / (dpsi + dphi);
- }
- temp = tau + eta;
- if (temp > dltub || temp < dltlb) {
- if (w < 0.) {
- eta = (dltub - tau) / 2.;
- } else {
- eta = (dltlb - tau) / 2.;
- }
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
-/* L70: */
- }
-
- tau += eta;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L80: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / delta[*n];
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
- dpsi + dphi);
-
- w = rhoinv + phi + psi;
-/* L90: */
- }
-
-/* Return with INFO = 1, NITER = MAXIT and not converged */
-
- *info = 1;
- *dlam = d__[*i__] + tau;
- goto L250;
-
-/* End for the case I = N */
-
- } else {
-
-/* The case for I < N */
-
- niter = 1;
- ip1 = *i__ + 1;
-
-/* Calculate initial guess */
-
- del = d__[ip1] - d__[*i__];
- midpt = del / 2.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[*i__] - midpt;
-/* L100: */
- }
-
- psi = 0.;
- i__1 = *i__ - 1;
- for (j = 1; j <= i__1; ++j) {
- psi += z__[j] * z__[j] / delta[j];
-/* L110: */
- }
-
- phi = 0.;
- i__1 = *i__ + 2;
- for (j = *n; j >= i__1; --j) {
- phi += z__[j] * z__[j] / delta[j];
-/* L120: */
- }
- c__ = rhoinv + psi + phi;
- w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
- delta[ip1];
-
- if (w > 0.) {
-
-/*
- d(i)< the ith eigenvalue < (d(i)+d(i+1))/2
-
- We choose d(i) as origin.
-*/
-
- orgati = TRUE_;
- a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
- b = z__[*i__] * z__[*i__] * del;
- if (a > 0.) {
- tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- } else {
- tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- }
- dltlb = 0.;
- dltub = midpt;
- } else {
-
-/*
- (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1)
-
- We choose d(i+1) as origin.
-*/
-
- orgati = FALSE_;
- a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
- b = z__[ip1] * z__[ip1] * del;
- if (a < 0.) {
- tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
- d__1))));
- } else {
- tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
- (c__ * 2.);
- }
- dltlb = -midpt;
- dltub = 0.;
- }
-
- if (orgati) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[*i__] - tau;
-/* L130: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[ip1] - tau;
-/* L140: */
- }
- }
- if (orgati) {
- ii = *i__;
- } else {
- ii = *i__ + 1;
- }
- iim1 = ii - 1;
- iip1 = ii + 1;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L150: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / delta[j];
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L160: */
- }
-
- w = rhoinv + phi + psi;
-
-/*
- W is the value of the secular function with
- its ii-th element removed.
-*/
-
- swtch3 = FALSE_;
- if (orgati) {
- if (w < 0.) {
- swtch3 = TRUE_;
- }
- } else {
- if (w > 0.) {
- swtch3 = TRUE_;
- }
- }
- if (ii == 1 || ii == *n) {
- swtch3 = FALSE_;
- }
-
- temp = z__[ii] / delta[ii];
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w += temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
- abs(tau) * dw;
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- if (orgati) {
- *dlam = d__[*i__] + tau;
- } else {
- *dlam = d__[ip1] + tau;
- }
- goto L250;
- }
-
- if (w <= 0.) {
- dltlb = max(dltlb,tau);
- } else {
- dltub = min(dltub,tau);
- }
-
-/* Calculate the new step */
-
- ++niter;
- if (! swtch3) {
- if (orgati) {
-/* Computing 2nd power */
- d__1 = z__[*i__] / delta[*i__];
- c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 *
- d__1);
- } else {
-/* Computing 2nd power */
- d__1 = z__[ip1] / delta[ip1];
- c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 *
- d__1);
- }
- a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
- dw;
- b = delta[*i__] * delta[ip1] * w;
- if (c__ == 0.) {
- if (a == 0.) {
- if (orgati) {
- a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
- (dpsi + dphi);
- } else {
- a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
- (dpsi + dphi);
- }
- }
- eta = b / a;
- } else if (a <= 0.) {
- eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- } else {
- eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- }
- } else {
-
-/* Interpolation using THREE most relevant poles */
-
- temp = rhoinv + psi + phi;
- if (orgati) {
- temp1 = z__[iim1] / delta[iim1];
- temp1 *= temp1;
- c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
- iip1]) * temp1;
- zz[0] = z__[iim1] * z__[iim1];
- zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
- } else {
- temp1 = z__[iip1] / delta[iip1];
- temp1 *= temp1;
- c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
- iim1]) * temp1;
- zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
- zz[2] = z__[iip1] * z__[iip1];
- }
- zz[1] = z__[ii] * z__[ii];
- dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
- if (*info != 0) {
- goto L250;
- }
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta >= 0.) {
- eta = -w / dw;
- }
- temp = tau + eta;
- if (temp > dltub || temp < dltlb) {
- if (w < 0.) {
- eta = (dltub - tau) / 2.;
- } else {
- eta = (dltlb - tau) / 2.;
- }
- }
-
- prew = w;
-
-/* L170: */
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
-/* L180: */
- }
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L190: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / delta[j];
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L200: */
- }
-
- temp = z__[ii] / delta[ii];
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w = rhoinv + phi + psi + temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + (
- d__1 = tau + eta, abs(d__1)) * dw;
-
- swtch = FALSE_;
- if (orgati) {
- if (-w > abs(prew) / 10.) {
- swtch = TRUE_;
- }
- } else {
- if (w > abs(prew) / 10.) {
- swtch = TRUE_;
- }
- }
-
- tau += eta;
-
-/* Main loop to update the values of the array DELTA */
-
- iter = niter + 1;
-
- for (niter = iter; niter <= MAXITERLOOPS; ++niter) {
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- if (orgati) {
- *dlam = d__[*i__] + tau;
- } else {
- *dlam = d__[ip1] + tau;
- }
- goto L250;
- }
-
- if (w <= 0.) {
- dltlb = max(dltlb,tau);
- } else {
- dltub = min(dltub,tau);
- }
-
-/* Calculate the new step */
-
- if (! swtch3) {
- if (! swtch) {
- if (orgati) {
-/* Computing 2nd power */
- d__1 = z__[*i__] / delta[*i__];
- c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
- d__1 * d__1);
- } else {
-/* Computing 2nd power */
- d__1 = z__[ip1] / delta[ip1];
- c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
- (d__1 * d__1);
- }
- } else {
- temp = z__[ii] / delta[ii];
- if (orgati) {
- dpsi += temp * temp;
- } else {
- dphi += temp * temp;
- }
- c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
- }
- a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
- * dw;
- b = delta[*i__] * delta[ip1] * w;
- if (c__ == 0.) {
- if (a == 0.) {
- if (! swtch) {
- if (orgati) {
- a = z__[*i__] * z__[*i__] + delta[ip1] *
- delta[ip1] * (dpsi + dphi);
- } else {
- a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
- *i__] * (dpsi + dphi);
- }
- } else {
- a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
- * delta[ip1] * dphi;
- }
- }
- eta = b / a;
- } else if (a <= 0.) {
- eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
- / (c__ * 2.);
- } else {
- eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
- abs(d__1))));
- }
- } else {
-
-/* Interpolation using THREE most relevant poles */
-
- temp = rhoinv + psi + phi;
- if (swtch) {
- c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
- zz[0] = delta[iim1] * delta[iim1] * dpsi;
- zz[2] = delta[iip1] * delta[iip1] * dphi;
- } else {
- if (orgati) {
- temp1 = z__[iim1] / delta[iim1];
- temp1 *= temp1;
- c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
- - d__[iip1]) * temp1;
- zz[0] = z__[iim1] * z__[iim1];
- zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
- dphi);
- } else {
- temp1 = z__[iip1] / delta[iip1];
- temp1 *= temp1;
- c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
- - d__[iim1]) * temp1;
- zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
- temp1));
- zz[2] = z__[iip1] * z__[iip1];
- }
- }
- dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
- info);
- if (*info != 0) {
- goto L250;
- }
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta >= 0.) {
- eta = -w / dw;
- }
- temp = tau + eta;
- if (temp > dltub || temp < dltlb) {
- if (w < 0.) {
- eta = (dltub - tau) / 2.;
- } else {
- eta = (dltlb - tau) / 2.;
- }
- }
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
-/* L210: */
- }
-
- tau += eta;
- prew = w;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / delta[j];
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L220: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / delta[j];
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L230: */
- }
-
- temp = z__[ii] / delta[ii];
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w = rhoinv + phi + psi + temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
- + abs(tau) * dw;
- if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) {
- swtch = ! swtch;
- }
-
-/* L240: */
- }
-
-/* Return with INFO = 1, NITER = MAXIT and not converged */
-
- *info = 1;
- if (orgati) {
- *dlam = d__[*i__] + tau;
- } else {
- *dlam = d__[ip1] + tau;
- }
-
- }
-
-L250:
-
- return 0;
-
-/* End of DLAED4 */
-
-} /* dlaed4_ */
-
-/* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__,
- doublereal *delta, doublereal *rho, doublereal *dlam)
-{
- /* System generated locals */
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal b, c__, w, del, tau, temp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- This subroutine computes the I-th eigenvalue of a symmetric rank-one
- modification of a 2-by-2 diagonal matrix
-
- diag( D ) + RHO * Z * transpose(Z) .
-
- The diagonal elements in the array D are assumed to satisfy
-
- D(i) < D(j) for i < j .
-
- We also assume RHO > 0 and that the Euclidean norm of the vector
- Z is one.
-
- Arguments
- =========
-
- I (input) INTEGER
- The index of the eigenvalue to be computed. I = 1 or I = 2.
-
- D (input) DOUBLE PRECISION array, dimension (2)
- The original eigenvalues. We assume D(1) < D(2).
-
- Z (input) DOUBLE PRECISION array, dimension (2)
- The components of the updating vector.
-
- DELTA (output) DOUBLE PRECISION array, dimension (2)
- The vector DELTA contains the information necessary
- to construct the eigenvectors.
-
- RHO (input) DOUBLE PRECISION
- The scalar in the symmetric updating formula.
-
- DLAM (output) DOUBLE PRECISION
- The computed lambda_I, the I-th updated eigenvalue.
-
- Further Details
- ===============
-
- Based on contributions by
- Ren-Cang Li, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --delta;
- --z__;
- --d__;
-
- /* Function Body */
- del = d__[2] - d__[1];
- if (*i__ == 1) {
- w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.;
- if (w > 0.) {
- b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[1] * z__[1] * del;
-
-/* B > ZERO, always */
-
- tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
- *dlam = d__[1] + tau;
- delta[1] = -z__[1] / tau;
- delta[2] = z__[2] / (del - tau);
- } else {
- b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[2] * z__[2] * del;
- if (b > 0.) {
- tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
- } else {
- tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
- }
- *dlam = d__[2] + tau;
- delta[1] = -z__[1] / (del + tau);
- delta[2] = -z__[2] / tau;
- }
- temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
- delta[1] /= temp;
- delta[2] /= temp;
- } else {
-
-/* Now I=2 */
-
- b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[2] * z__[2] * del;
- if (b > 0.) {
- tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
- } else {
- tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
- }
- *dlam = d__[2] + tau;
- delta[1] = -z__[1] / (del + tau);
- delta[2] = -z__[2] / tau;
- temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
- delta[1] /= temp;
- delta[2] /= temp;
- }
- return 0;
-
-/* End OF DLAED5 */
-
-} /* dlaed5_ */
-
-/* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal *
- rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal *
- tau, integer *info)
-{
- /* Initialized data */
-
- static logical first = TRUE_;
-
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2, d__3, d__4;
-
- /* Builtin functions */
- double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *);
-
- /* Local variables */
- static doublereal a, b, c__, f;
- static integer i__;
- static doublereal fc, df, ddf, eta, eps, base;
- static integer iter;
- static doublereal temp, temp1, temp2, temp3, temp4;
- static logical scale;
- static integer niter;
- static doublereal small1, small2, sminv1, sminv2;
-
- static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAED6 computes the positive or negative root (closest to the origin)
- of
- z(1) z(2) z(3)
- f(x) = rho + --------- + ---------- + ---------
- d(1)-x d(2)-x d(3)-x
-
- It is assumed that
-
- if ORGATI = .true. the root is between d(2) and d(3);
- otherwise it is between d(1) and d(2)
-
- This routine will be called by DLAED4 when necessary. In most cases,
- the root sought is the smallest in magnitude, though it might not be
- in some extremely rare situations.
-
- Arguments
- =========
-
- KNITER (input) INTEGER
- Refer to DLAED4 for its significance.
-
- ORGATI (input) LOGICAL
- If ORGATI is true, the needed root is between d(2) and
- d(3); otherwise it is between d(1) and d(2). See
- DLAED4 for further details.
-
- RHO (input) DOUBLE PRECISION
- Refer to the equation f(x) above.
-
- D (input) DOUBLE PRECISION array, dimension (3)
- D satisfies d(1) < d(2) < d(3).
-
- Z (input) DOUBLE PRECISION array, dimension (3)
- Each of the elements in z must be positive.
-
- FINIT (input) DOUBLE PRECISION
- The value of f at 0. It is more accurate than the one
- evaluated inside this routine (if someone wants to do
- so).
-
- TAU (output) DOUBLE PRECISION
- The root of the equation f(x).
-
- INFO (output) INTEGER
- = 0: successful exit
- > 0: if INFO = 1, failure to converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ren-Cang Li, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-*/
-
- /* Parameter adjustments */
- --z__;
- --d__;
-
- /* Function Body */
-
- *info = 0;
-
- niter = 1;
- *tau = 0.;
- if (*kniter == 2) {
- if (*orgati) {
- temp = (d__[3] - d__[2]) / 2.;
- c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
- a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
- b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
- } else {
- temp = (d__[1] - d__[2]) / 2.;
- c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
- a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
- b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
- }
-/* Computing MAX */
- d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
- temp = max(d__1,d__2);
- a /= temp;
- b /= temp;
- c__ /= temp;
- if (c__ == 0.) {
- *tau = b / a;
- } else if (a <= 0.) {
- *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- } else {
- *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))
- ));
- }
- temp = *rho + z__[1] / (d__[1] - *tau) + z__[2] / (d__[2] - *tau) +
- z__[3] / (d__[3] - *tau);
- if (abs(*finit) <= abs(temp)) {
- *tau = 0.;
- }
- }
-
-/*
- On first call to routine, get machine parameters for
- possible scaling to avoid overflow
-*/
-
- if (first) {
- eps = EPSILON;
- base = BASE;
- i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.);
- small1 = pow_di(&base, &i__1);
- sminv1 = 1. / small1;
- small2 = small1 * small1;
- sminv2 = sminv1 * sminv1;
- first = FALSE_;
- }
-
-/*
- Determine if scaling of inputs necessary to avoid overflow
- when computing 1/TEMP**3
-*/
-
- if (*orgati) {
-/* Computing MIN */
- d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - *
- tau, abs(d__2));
- temp = min(d__3,d__4);
- } else {
-/* Computing MIN */
- d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - *
- tau, abs(d__2));
- temp = min(d__3,d__4);
- }
- scale = FALSE_;
- if (temp <= small1) {
- scale = TRUE_;
- if (temp <= small2) {
-
-/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
-
- sclfac = sminv2;
- sclinv = small2;
- } else {
-
-/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
-
- sclfac = sminv1;
- sclinv = small1;
- }
-
-/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
-
- for (i__ = 1; i__ <= 3; ++i__) {
- dscale[i__ - 1] = d__[i__] * sclfac;
- zscale[i__ - 1] = z__[i__] * sclfac;
-/* L10: */
- }
- *tau *= sclfac;
- } else {
-
-/* Copy D and Z to DSCALE and ZSCALE */
-
- for (i__ = 1; i__ <= 3; ++i__) {
- dscale[i__ - 1] = d__[i__];
- zscale[i__ - 1] = z__[i__];
-/* L20: */
- }
- }
-
- fc = 0.;
- df = 0.;
- ddf = 0.;
- for (i__ = 1; i__ <= 3; ++i__) {
- temp = 1. / (dscale[i__ - 1] - *tau);
- temp1 = zscale[i__ - 1] * temp;
- temp2 = temp1 * temp;
- temp3 = temp2 * temp;
- fc += temp1 / dscale[i__ - 1];
- df += temp2;
- ddf += temp3;
-/* L30: */
- }
- f = *finit + *tau * fc;
-
- if (abs(f) <= 0.) {
- goto L60;
- }
-
-/*
- Iteration begins
-
- It is not hard to see that
-
- 1) Iterations will go up monotonically
- if FINIT < 0;
-
- 2) Iterations will go down monotonically
- if FINIT > 0.
-*/
-
- iter = niter + 1;
-
- for (niter = iter; niter <= MAXITERLOOPS; ++niter) {
-
- if (*orgati) {
- temp1 = dscale[1] - *tau;
- temp2 = dscale[2] - *tau;
- } else {
- temp1 = dscale[0] - *tau;
- temp2 = dscale[1] - *tau;
- }
- a = (temp1 + temp2) * f - temp1 * temp2 * df;
- b = temp1 * temp2 * f;
- c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
-/* Computing MAX */
- d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
- temp = max(d__1,d__2);
- a /= temp;
- b /= temp;
- c__ /= temp;
- if (c__ == 0.) {
- eta = b / a;
- } else if (a <= 0.) {
- eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
- * 2.);
- } else {
- eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
- );
- }
- if (f * eta >= 0.) {
- eta = -f / df;
- }
-
- temp = eta + *tau;
- if (*orgati) {
- if ((eta > 0. && temp >= dscale[2])) {
- eta = (dscale[2] - *tau) / 2.;
- }
- if ((eta < 0. && temp <= dscale[1])) {
- eta = (dscale[1] - *tau) / 2.;
- }
- } else {
- if ((eta > 0. && temp >= dscale[1])) {
- eta = (dscale[1] - *tau) / 2.;
- }
- if ((eta < 0. && temp <= dscale[0])) {
- eta = (dscale[0] - *tau) / 2.;
- }
- }
- *tau += eta;
-
- fc = 0.;
- erretm = 0.;
- df = 0.;
- ddf = 0.;
- for (i__ = 1; i__ <= 3; ++i__) {
- temp = 1. / (dscale[i__ - 1] - *tau);
- temp1 = zscale[i__ - 1] * temp;
- temp2 = temp1 * temp;
- temp3 = temp2 * temp;
- temp4 = temp1 / dscale[i__ - 1];
- fc += temp4;
- erretm += abs(temp4);
- df += temp2;
- ddf += temp3;
-/* L40: */
- }
- f = *finit + *tau * fc;
- erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df;
- if (abs(f) <= eps * erretm) {
- goto L60;
- }
-/* L50: */
- }
- *info = 1;
-L60:
-
-/* Undo scaling */
-
- if (scale) {
- *tau *= sclinv;
- }
- return 0;
-
-/* End of DLAED6 */
-
-} /* dlaed6_ */
-
-/* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz,
- integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
- doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer
- *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer *
- perm, integer *givptr, integer *givcol, doublereal *givnum,
- doublereal *work, integer *iwork, integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, i__1, i__2;
-
- /* Builtin functions */
- integer pow_ii(integer *, integer *);
-
- /* Local variables */
- static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer indxc, indxp;
- extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *, integer *,
- doublereal *, integer *, integer *, integer *), dlaed9_(integer *,
- integer *, integer *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- integer *, integer *), dlaeda_(integer *, integer *, integer *,
- integer *, integer *, integer *, integer *, integer *, doublereal
- *, doublereal *, integer *, doublereal *, doublereal *, integer *)
- ;
- static integer idlmda;
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), xerbla_(char *, integer *);
- static integer coltyp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAED7 computes the updated eigensystem of a diagonal
- matrix after modification by a rank-one symmetric matrix. This
- routine is used only for the eigenproblem which requires all
- eigenvalues and optionally eigenvectors of a dense symmetric matrix
- that has been reduced to tridiagonal form. DLAED1 handles
- the case in which all eigenvalues and eigenvectors of a symmetric
- tridiagonal matrix are desired.
-
- T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
-
- where Z = Q'u, u is a vector of length N with ones in the
- CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
-
- The eigenvectors of the original matrix are stored in Q, and the
- eigenvalues are in D. The algorithm consists of three stages:
-
- The first stage consists of deflating the size of the problem
- when there are multiple eigenvalues or if there is a zero in
- the Z vector. For each such occurence the dimension of the
- secular equation problem is reduced by one. This stage is
- performed by the routine DLAED8.
-
- The second stage consists of calculating the updated
- eigenvalues. This is done by finding the roots of the secular
- equation via the routine DLAED4 (as called by DLAED9).
- This routine also calculates the eigenvectors of the current
- problem.
-
- The final stage consists of computing the updated eigenvectors
- directly using the updated eigenvalues. The eigenvectors for
- the current problem are multiplied with the eigenvectors from
- the overall problem.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- = 0: Compute eigenvalues only.
- = 1: Compute eigenvectors of original dense symmetric matrix
- also. On entry, Q contains the orthogonal matrix used
- to reduce the original matrix to tridiagonal form.
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- QSIZ (input) INTEGER
- The dimension of the orthogonal matrix used to reduce
- the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
-
- TLVLS (input) INTEGER
- The total number of merging levels in the overall divide and
- conquer tree.
-
- CURLVL (input) INTEGER
- The current level in the overall merge routine,
- 0 <= CURLVL <= TLVLS.
-
- CURPBM (input) INTEGER
- The current problem in the current level in the overall
- merge routine (counting from upper left to lower right).
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the eigenvalues of the rank-1-perturbed matrix.
- On exit, the eigenvalues of the repaired matrix.
-
- Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
- On entry, the eigenvectors of the rank-1-perturbed matrix.
- On exit, the eigenvectors of the repaired tridiagonal matrix.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max(1,N).
-
- INDXQ (output) INTEGER array, dimension (N)
- The permutation which will reintegrate the subproblem just
- solved back into sorted order, i.e., D( INDXQ( I = 1, N ) )
- will be in ascending order.
-
- RHO (input) DOUBLE PRECISION
- The subdiagonal element used to create the rank-1
- modification.
-
- CUTPNT (input) INTEGER
- Contains the location of the last eigenvalue in the leading
- sub-matrix. min(1,N) <= CUTPNT <= N.
-
- QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1)
- Stores eigenvectors of submatrices encountered during
- divide and conquer, packed together. QPTR points to
- beginning of the submatrices.
-
- QPTR (input/output) INTEGER array, dimension (N+2)
- List of indices pointing to beginning of submatrices stored
- in QSTORE. The submatrices are numbered starting at the
- bottom left of the divide and conquer tree, from left to
- right and bottom to top.
-
- PRMPTR (input) INTEGER array, dimension (N lg N)
- Contains a list of pointers which indicate where in PERM a
- level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
- indicates the size of the permutation and also the size of
- the full, non-deflated problem.
-
- PERM (input) INTEGER array, dimension (N lg N)
- Contains the permutations (from deflation and sorting) to be
- applied to each eigenblock.
-
- GIVPTR (input) INTEGER array, dimension (N lg N)
- Contains a list of pointers which indicate where in GIVCOL a
- level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
- indicates the number of Givens rotations.
-
- GIVCOL (input) INTEGER array, dimension (2, N lg N)
- Each pair of numbers indicates a pair of columns to take place
- in a Givens rotation.
-
- GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
- Each number indicates the S value to be used in the
- corresponding Givens rotation.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N)
-
- IWORK (workspace) INTEGER array, dimension (4*N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an eigenvalue did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --indxq;
- --qstore;
- --qptr;
- --prmptr;
- --perm;
- --givptr;
- givcol -= 3;
- givnum -= 3;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if ((*icompq == 1 && *qsiz < *n)) {
- *info = -4;
- } else if (*ldq < max(1,*n)) {
- *info = -9;
- } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
- *info = -12;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED7", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
-/*
- The following values are for bookkeeping purposes only. They are
- integer pointers which indicate the portion of the workspace
- used by a particular array in DLAED8 and DLAED9.
-*/
-
- if (*icompq == 1) {
- ldq2 = *qsiz;
- } else {
- ldq2 = *n;
- }
-
- iz = 1;
- idlmda = iz + *n;
- iw = idlmda + *n;
- iq2 = iw + *n;
- is = iq2 + *n * ldq2;
-
- indx = 1;
- indxc = indx + *n;
- coltyp = indxc + *n;
- indxp = coltyp + *n;
-
-/*
- Form the z-vector which consists of the last row of Q_1 and the
- first row of Q_2.
-*/
-
- ptr = pow_ii(&c__2, tlvls) + 1;
- i__1 = *curlvl - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = *tlvls - i__;
- ptr += pow_ii(&c__2, &i__2);
-/* L10: */
- }
- curr = ptr + *curpbm;
- dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
- givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
- + *n], info);
-
-/*
- When solving the final problem, we no longer need the stored data,
- so we will overwrite the data from this level onto the previously
- used storage space.
-*/
-
- if (*curlvl == *tlvls) {
- qptr[curr] = 1;
- prmptr[curr] = 1;
- givptr[curr] = 1;
- }
-
-/* Sort and Deflate eigenvalues. */
-
- dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
- cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
- perm[prmptr[curr]], &givptr[curr + 1], &givcol[((givptr[curr]) <<
- (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1], &iwork[indxp], &
- iwork[indx], info);
- prmptr[curr + 1] = prmptr[curr] + *n;
- givptr[curr + 1] += givptr[curr];
-
-/* Solve Secular Equation. */
-
- if (k != 0) {
- dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
- &work[iw], &qstore[qptr[curr]], &k, info);
- if (*info != 0) {
- goto L30;
- }
- if (*icompq == 1) {
- dgemm_("N", "N", qsiz, &k, &k, &c_b15, &work[iq2], &ldq2, &qstore[
- qptr[curr]], &k, &c_b29, &q[q_offset], ldq);
- }
-/* Computing 2nd power */
- i__1 = k;
- qptr[curr + 1] = qptr[curr] + i__1 * i__1;
-
-/* Prepare the INDXQ sorting permutation. */
-
- n1 = k;
- n2 = *n - k;
- dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
- } else {
- qptr[curr + 1] = qptr[curr];
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- indxq[i__] = i__;
-/* L20: */
- }
- }
-
-L30:
- return 0;
-
-/* End of DLAED7 */
-
-} /* dlaed7_ */
-
-/* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer
- *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq,
- doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda,
- doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer
- *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer
- *indx, integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal c__;
- static integer i__, j;
- static doublereal s, t;
- static integer k2, n1, n2, jp, n1p1;
- static doublereal eps, tau, tol;
- static integer jlam, imax, jmax;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *), dscal_(
- integer *, doublereal *, doublereal *, integer *), dcopy_(integer
- *, doublereal *, integer *, doublereal *, integer *);
-
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), dlacpy_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAED8 merges the two sets of eigenvalues together into a single
- sorted set. Then it tries to deflate the size of the problem.
- There are two ways in which deflation can occur: when two or more
- eigenvalues are close together or if there is a tiny element in the
- Z vector. For each such occurrence the order of the related secular
- equation problem is reduced by one.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- = 0: Compute eigenvalues only.
- = 1: Compute eigenvectors of original dense symmetric matrix
- also. On entry, Q contains the orthogonal matrix used
- to reduce the original matrix to tridiagonal form.
-
- K (output) INTEGER
- The number of non-deflated eigenvalues, and the order of the
- related secular equation.
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- QSIZ (input) INTEGER
- The dimension of the orthogonal matrix used to reduce
- the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the eigenvalues of the two submatrices to be
- combined. On exit, the trailing (N-K) updated eigenvalues
- (those which were deflated) sorted into increasing order.
-
- Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
- If ICOMPQ = 0, Q is not referenced. Otherwise,
- on entry, Q contains the eigenvectors of the partially solved
- system which has been previously updated in matrix
- multiplies with other partially solved eigensystems.
- On exit, Q contains the trailing (N-K) updated eigenvectors
- (those which were deflated) in its last N-K columns.
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max(1,N).
-
- INDXQ (input) INTEGER array, dimension (N)
- The permutation which separately sorts the two sub-problems
- in D into ascending order. Note that elements in the second
- half of this permutation must first have CUTPNT added to
- their values in order to be accurate.
-
- RHO (input/output) DOUBLE PRECISION
- On entry, the off-diagonal element associated with the rank-1
- cut which originally split the two submatrices which are now
- being recombined.
- On exit, RHO has been modified to the value required by
- DLAED3.
-
- CUTPNT (input) INTEGER
- The location of the last eigenvalue in the leading
- sub-matrix. min(1,N) <= CUTPNT <= N.
-
- Z (input) DOUBLE PRECISION array, dimension (N)
- On entry, Z contains the updating vector (the last row of
- the first sub-eigenvector matrix and the first row of the
- second sub-eigenvector matrix).
- On exit, the contents of Z are destroyed by the updating
- process.
-
- DLAMDA (output) DOUBLE PRECISION array, dimension (N)
- A copy of the first K eigenvalues which will be used by
- DLAED3 to form the secular equation.
-
- Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N)
- If ICOMPQ = 0, Q2 is not referenced. Otherwise,
- a copy of the first K eigenvectors which will be used by
- DLAED7 in a matrix multiply (DGEMM) to update the new
- eigenvectors.
-
- LDQ2 (input) INTEGER
- The leading dimension of the array Q2. LDQ2 >= max(1,N).
-
- W (output) DOUBLE PRECISION array, dimension (N)
- The first k values of the final deflation-altered z-vector and
- will be passed to DLAED3.
-
- PERM (output) INTEGER array, dimension (N)
- The permutations (from deflation and sorting) to be applied
- to each eigenblock.
-
- GIVPTR (output) INTEGER
- The number of Givens rotations which took place in this
- subproblem.
-
- GIVCOL (output) INTEGER array, dimension (2, N)
- Each pair of numbers indicates a pair of columns to take place
- in a Givens rotation.
-
- GIVNUM (output) DOUBLE PRECISION array, dimension (2, N)
- Each number indicates the S value to be used in the
- corresponding Givens rotation.
-
- INDXP (workspace) INTEGER array, dimension (N)
- The permutation used to place deflated values of D at the end
- of the array. INDXP(1:K) points to the nondeflated D-values
- and INDXP(K+1:N) points to the deflated eigenvalues.
-
- INDX (workspace) INTEGER array, dimension (N)
- The permutation used to sort the contents of D into ascending
- order.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --indxq;
- --z__;
- --dlamda;
- q2_dim1 = *ldq2;
- q2_offset = 1 + q2_dim1 * 1;
- q2 -= q2_offset;
- --w;
- --perm;
- givcol -= 3;
- givnum -= 3;
- --indxp;
- --indx;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*n < 0) {
- *info = -3;
- } else if ((*icompq == 1 && *qsiz < *n)) {
- *info = -4;
- } else if (*ldq < max(1,*n)) {
- *info = -7;
- } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
- *info = -10;
- } else if (*ldq2 < max(1,*n)) {
- *info = -14;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED8", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- n1 = *cutpnt;
- n2 = *n - n1;
- n1p1 = n1 + 1;
-
- if (*rho < 0.) {
- dscal_(&n2, &c_b151, &z__[n1p1], &c__1);
- }
-
-/* Normalize z so that norm(z) = 1 */
-
- t = 1. / sqrt(2.);
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- indx[j] = j;
-/* L10: */
- }
- dscal_(n, &t, &z__[1], &c__1);
- *rho = (d__1 = *rho * 2., abs(d__1));
-
-/* Sort the eigenvalues into increasing order */
-
- i__1 = *n;
- for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
- indxq[i__] += *cutpnt;
-/* L20: */
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlamda[i__] = d__[indxq[i__]];
- w[i__] = z__[indxq[i__]];
-/* L30: */
- }
- i__ = 1;
- j = *cutpnt + 1;
- dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__[i__] = dlamda[indx[i__]];
- z__[i__] = w[indx[i__]];
-/* L40: */
- }
-
-/* Calculate the allowable deflation tolerence */
-
- imax = idamax_(n, &z__[1], &c__1);
- jmax = idamax_(n, &d__[1], &c__1);
- eps = EPSILON;
- tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
-
-/*
- If the rank-1 modifier is small enough, no more needs to be done
- except to reorganize Q so that its columns correspond with the
- elements in D.
-*/
-
- if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
- *k = 0;
- if (*icompq == 0) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- perm[j] = indxq[indx[j]];
-/* L50: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- perm[j] = indxq[indx[j]];
- dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
- + 1], &c__1);
-/* L60: */
- }
- dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
- }
- return 0;
- }
-
-/*
- If there are multiple eigenvalues then the problem deflates. Here
- the number of equal eigenvalues are found. As each equal
- eigenvalue is found, an elementary reflector is computed to rotate
- the corresponding eigensubspace so that the corresponding
- components of Z are zero in this new basis.
-*/
-
- *k = 0;
- *givptr = 0;
- k2 = *n + 1;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- indxp[k2] = j;
- if (j == *n) {
- goto L110;
- }
- } else {
- jlam = j;
- goto L80;
- }
-/* L70: */
- }
-L80:
- ++j;
- if (j > *n) {
- goto L100;
- }
- if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- indxp[k2] = j;
- } else {
-
-/* Check if eigenvalues are close enough to allow deflation. */
-
- s = z__[jlam];
- c__ = z__[j];
-
-/*
- Find sqrt(a**2+b**2) without overflow or
- destructive underflow.
-*/
-
- tau = dlapy2_(&c__, &s);
- t = d__[j] - d__[jlam];
- c__ /= tau;
- s = -s / tau;
- if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
-
-/* Deflation is possible. */
-
- z__[j] = tau;
- z__[jlam] = 0.;
-
-/* Record the appropriate Givens rotation */
-
- ++(*givptr);
- givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
- givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
- givnum[((*givptr) << (1)) + 1] = c__;
- givnum[((*givptr) << (1)) + 2] = s;
- if (*icompq == 1) {
- drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
- indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
- }
- t = d__[jlam] * c__ * c__ + d__[j] * s * s;
- d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
- d__[jlam] = t;
- --k2;
- i__ = 1;
-L90:
- if (k2 + i__ <= *n) {
- if (d__[jlam] < d__[indxp[k2 + i__]]) {
- indxp[k2 + i__ - 1] = indxp[k2 + i__];
- indxp[k2 + i__] = jlam;
- ++i__;
- goto L90;
- } else {
- indxp[k2 + i__ - 1] = jlam;
- }
- } else {
- indxp[k2 + i__ - 1] = jlam;
- }
- jlam = j;
- } else {
- ++(*k);
- w[*k] = z__[jlam];
- dlamda[*k] = d__[jlam];
- indxp[*k] = jlam;
- jlam = j;
- }
- }
- goto L80;
-L100:
-
-/* Record the last eigenvalue. */
-
- ++(*k);
- w[*k] = z__[jlam];
- dlamda[*k] = d__[jlam];
- indxp[*k] = jlam;
-
-L110:
-
-/*
- Sort the eigenvalues and corresponding eigenvectors into DLAMDA
- and Q2 respectively. The eigenvalues/vectors which were not
- deflated go into the first K slots of DLAMDA and Q2 respectively,
- while those which were deflated go into the last N - K slots.
-*/
-
- if (*icompq == 0) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- jp = indxp[j];
- dlamda[j] = d__[jp];
- perm[j] = indxq[indx[jp]];
-/* L120: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- jp = indxp[j];
- dlamda[j] = d__[jp];
- perm[j] = indxq[indx[jp]];
- dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
- , &c__1);
-/* L130: */
- }
- }
-
-/*
- The deflated eigenvalues and their corresponding vectors go back
- into the last N - K slots of D and Q respectively.
-*/
-
- if (*k < *n) {
- if (*icompq == 0) {
- i__1 = *n - *k;
- dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
- } else {
- i__1 = *n - *k;
- dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
- i__1 = *n - *k;
- dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
- k + 1) * q_dim1 + 1], ldq);
- }
- }
-
- return 0;
-
-/* End of DLAED8 */
-
-} /* dlaed8_ */
-
-/* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop,
- integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal *
- rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds,
- integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer i__, j;
- static doublereal temp;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *), dlaed4_(integer *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *);
- extern doublereal dlamc3_(doublereal *, doublereal *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAED9 finds the roots of the secular equation, as defined by the
- values in D, Z, and RHO, between KSTART and KSTOP. It makes the
- appropriate calls to DLAED4 and then stores the new matrix of
- eigenvectors for use in calculating the next level of Z vectors.
-
- Arguments
- =========
-
- K (input) INTEGER
- The number of terms in the rational function to be solved by
- DLAED4. K >= 0.
-
- KSTART (input) INTEGER
- KSTOP (input) INTEGER
- The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP
- are to be computed. 1 <= KSTART <= KSTOP <= K.
-
- N (input) INTEGER
- The number of rows and columns in the Q matrix.
- N >= K (delation may result in N > K).
-
- D (output) DOUBLE PRECISION array, dimension (N)
- D(I) contains the updated eigenvalues
- for KSTART <= I <= KSTOP.
-
- Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N)
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= max( 1, N ).
-
- RHO (input) DOUBLE PRECISION
- The value of the parameter in the rank one update equation.
- RHO >= 0 required.
-
- DLAMDA (input) DOUBLE PRECISION array, dimension (K)
- The first K elements of this array contain the old roots
- of the deflated updating problem. These are the poles
- of the secular equation.
-
- W (input) DOUBLE PRECISION array, dimension (K)
- The first K elements of this array contain the components
- of the deflation-adjusted updating vector.
-
- S (output) DOUBLE PRECISION array, dimension (LDS, K)
- Will contain the eigenvectors of the repaired matrix which
- will be stored for subsequent Z vector calculation and
- multiplied by the previously accumulated eigenvectors
- to update the system.
-
- LDS (input) INTEGER
- The leading dimension of S. LDS >= max( 1, K ).
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an eigenvalue did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --dlamda;
- --w;
- s_dim1 = *lds;
- s_offset = 1 + s_dim1 * 1;
- s -= s_offset;
-
- /* Function Body */
- *info = 0;
-
- if (*k < 0) {
- *info = -1;
- } else if (*kstart < 1 || *kstart > max(1,*k)) {
- *info = -2;
- } else if (max(1,*kstop) < *kstart || *kstop > max(1,*k)) {
- *info = -3;
- } else if (*n < *k) {
- *info = -4;
- } else if (*ldq < max(1,*k)) {
- *info = -7;
- } else if (*lds < max(1,*k)) {
- *info = -12;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAED9", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*k == 0) {
- return 0;
- }
-
-/*
- Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
- be computed with high relative accuracy (barring over/underflow).
- This is a problem on machines without a guard digit in
- add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
- The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
- which on any of these machines zeros out the bottommost
- bit of DLAMDA(I) if it is 1; this makes the subsequent
- subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
- occurs. On binary machines with a guard digit (almost all
- machines) it does not change DLAMDA(I) at all. On hexadecimal
- and decimal machines with a guard digit, it slightly
- changes the bottommost bits of DLAMDA(I). It does not account
- for hexadecimal or decimal machines without guard digits
- (we know of none). We use a subroutine call to compute
- 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
- this code.
-*/
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
-/* L10: */
- }
-
- i__1 = *kstop;
- for (j = *kstart; j <= i__1; ++j) {
- dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
- info);
-
-/* If the zero finder fails, the computation is terminated. */
-
- if (*info != 0) {
- goto L120;
- }
-/* L20: */
- }
-
- if (*k == 1 || *k == 2) {
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = *k;
- for (j = 1; j <= i__2; ++j) {
- s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
-/* L30: */
- }
-/* L40: */
- }
- goto L120;
- }
-
-/* Compute updated W. */
-
- dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
-
-/* Initialize W(I) = Q(I,I) */
-
- i__1 = *ldq + 1;
- dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
-/* L50: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
-/* L60: */
- }
-/* L70: */
- }
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__1 = sqrt(-w[i__]);
- w[i__] = d_sign(&d__1, &s[i__ + s_dim1]);
-/* L80: */
- }
-
-/* Compute eigenvectors of the modified rank-1 modification. */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *k;
- for (i__ = 1; i__ <= i__2; ++i__) {
- q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
-/* L90: */
- }
- temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1);
- i__2 = *k;
- for (i__ = 1; i__ <= i__2; ++i__) {
- s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
-/* L100: */
- }
-/* L110: */
- }
-
-L120:
- return 0;
-
-/* End of DLAED9 */
-
-} /* dlaed9_ */
-
-/* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl,
- integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
- integer *givcol, doublereal *givnum, doublereal *q, integer *qptr,
- doublereal *z__, doublereal *ztemp, integer *info)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
-
- /* Builtin functions */
- integer pow_ii(integer *, integer *);
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, k, mid, ptr;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer curr, bsiz1, bsiz2, psiz1, psiz2, zptr1;
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *), dcopy_(integer *,
- doublereal *, integer *, doublereal *, integer *), xerbla_(char *,
- integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAEDA computes the Z vector corresponding to the merge step in the
- CURLVLth step of the merge process with TLVLS steps for the CURPBMth
- problem.
-
- Arguments
- =========
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- TLVLS (input) INTEGER
- The total number of merging levels in the overall divide and
- conquer tree.
-
- CURLVL (input) INTEGER
- The current level in the overall merge routine,
- 0 <= curlvl <= tlvls.
-
- CURPBM (input) INTEGER
- The current problem in the current level in the overall
- merge routine (counting from upper left to lower right).
-
- PRMPTR (input) INTEGER array, dimension (N lg N)
- Contains a list of pointers which indicate where in PERM a
- level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
- indicates the size of the permutation and incidentally the
- size of the full, non-deflated problem.
-
- PERM (input) INTEGER array, dimension (N lg N)
- Contains the permutations (from deflation and sorting) to be
- applied to each eigenblock.
-
- GIVPTR (input) INTEGER array, dimension (N lg N)
- Contains a list of pointers which indicate where in GIVCOL a
- level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
- indicates the number of Givens rotations.
-
- GIVCOL (input) INTEGER array, dimension (2, N lg N)
- Each pair of numbers indicates a pair of columns to take place
- in a Givens rotation.
-
- GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
- Each number indicates the S value to be used in the
- corresponding Givens rotation.
-
- Q (input) DOUBLE PRECISION array, dimension (N**2)
- Contains the square eigenblocks from previous levels, the
- starting positions for blocks are given by QPTR.
-
- QPTR (input) INTEGER array, dimension (N+2)
- Contains a list of pointers which indicate where in Q an
- eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates
- the size of the block.
-
- Z (output) DOUBLE PRECISION array, dimension (N)
- On output this vector contains the updating vector (the last
- row of the first sub-eigenvector matrix and the first row of
- the second sub-eigenvector matrix).
-
- ZTEMP (workspace) DOUBLE PRECISION array, dimension (N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --ztemp;
- --z__;
- --qptr;
- --q;
- givnum -= 3;
- givcol -= 3;
- --givptr;
- --perm;
- --prmptr;
-
- /* Function Body */
- *info = 0;
-
- if (*n < 0) {
- *info = -1;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLAEDA", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
-/* Determine location of first number in second half. */
-
- mid = *n / 2 + 1;
-
-/* Gather last/first rows of appropriate eigenblocks into center of Z */
-
- ptr = 1;
-
-/*
- Determine location of lowest level subproblem in the full storage
- scheme
-*/
-
- i__1 = *curlvl - 1;
- curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
-
-/*
- Determine size of these matrices. We add HALF to the value of
- the SQRT in case the machine underestimates one of these square
- roots.
-*/
-
- bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5);
- bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) +
- .5);
- i__1 = mid - bsiz1 - 1;
- for (k = 1; k <= i__1; ++k) {
- z__[k] = 0.;
-/* L10: */
- }
- dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
- c__1);
- dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
- i__1 = *n;
- for (k = mid + bsiz2; k <= i__1; ++k) {
- z__[k] = 0.;
-/* L20: */
- }
-
-/*
- Loop thru remaining levels 1 -> CURLVL applying the Givens
- rotations and permutation and then multiplying the center matrices
- against the current Z.
-*/
-
- ptr = pow_ii(&c__2, tlvls) + 1;
- i__1 = *curlvl - 1;
- for (k = 1; k <= i__1; ++k) {
- i__2 = *curlvl - k;
- i__3 = *curlvl - k - 1;
- curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
- 1;
- psiz1 = prmptr[curr + 1] - prmptr[curr];
- psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
- zptr1 = mid - psiz1;
-
-/* Apply Givens at CURR and CURR+1 */
-
- i__2 = givptr[curr + 1] - 1;
- for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
- drot_(&c__1, &z__[zptr1 + givcol[((i__) << (1)) + 1] - 1], &c__1,
- &z__[zptr1 + givcol[((i__) << (1)) + 2] - 1], &c__1, &
- givnum[((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
-/* L30: */
- }
- i__2 = givptr[curr + 2] - 1;
- for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
- drot_(&c__1, &z__[mid - 1 + givcol[((i__) << (1)) + 1]], &c__1, &
- z__[mid - 1 + givcol[((i__) << (1)) + 2]], &c__1, &givnum[
- ((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
-/* L40: */
- }
- psiz1 = prmptr[curr + 1] - prmptr[curr];
- psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
- i__2 = psiz1 - 1;
- for (i__ = 0; i__ <= i__2; ++i__) {
- ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
-/* L50: */
- }
- i__2 = psiz2 - 1;
- for (i__ = 0; i__ <= i__2; ++i__) {
- ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
- 1];
-/* L60: */
- }
-
-/*
- Multiply Blocks at CURR and CURR+1
-
- Determine size of these matrices. We add HALF to the value of
- the SQRT in case the machine underestimates one of these
- square roots.
-*/
-
- bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) +
- .5);
- bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])
- ) + .5);
- if (bsiz1 > 0) {
- dgemv_("T", &bsiz1, &bsiz1, &c_b15, &q[qptr[curr]], &bsiz1, &
- ztemp[1], &c__1, &c_b29, &z__[zptr1], &c__1);
- }
- i__2 = psiz1 - bsiz1;
- dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
- if (bsiz2 > 0) {
- dgemv_("T", &bsiz2, &bsiz2, &c_b15, &q[qptr[curr + 1]], &bsiz2, &
- ztemp[psiz1 + 1], &c__1, &c_b29, &z__[mid], &c__1);
- }
- i__2 = psiz2 - bsiz2;
- dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
- c__1);
-
- i__2 = *tlvls - k;
- ptr += pow_ii(&c__2, &i__2);
-/* L70: */
- }
-
- return 0;
-
-/* End of DLAEDA */
-
-} /* dlaeda_ */
-
-/* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__,
- doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1)
-{
- /* System generated locals */
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
- static integer sgn1, sgn2;
- static doublereal acmn, acmx;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix
- [ A B ]
- [ B C ].
- On return, RT1 is the eigenvalue of larger absolute value, RT2 is the
- eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right
- eigenvector for RT1, giving the decomposition
-
- [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ]
- [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ].
-
- Arguments
- =========
-
- A (input) DOUBLE PRECISION
- The (1,1) element of the 2-by-2 matrix.
-
- B (input) DOUBLE PRECISION
- The (1,2) element and the conjugate of the (2,1) element of
- the 2-by-2 matrix.
-
- C (input) DOUBLE PRECISION
- The (2,2) element of the 2-by-2 matrix.
-
- RT1 (output) DOUBLE PRECISION
- The eigenvalue of larger absolute value.
-
- RT2 (output) DOUBLE PRECISION
- The eigenvalue of smaller absolute value.
-
- CS1 (output) DOUBLE PRECISION
- SN1 (output) DOUBLE PRECISION
- The vector (CS1, SN1) is a unit right eigenvector for RT1.
-
- Further Details
- ===============
-
- RT1 is accurate to a few ulps barring over/underflow.
-
- RT2 may be inaccurate if there is massive cancellation in the
- determinant A*C-B*B; higher precision or correctly rounded or
- correctly truncated arithmetic would be needed to compute RT2
- accurately in all cases.
-
- CS1 and SN1 are accurate to a few ulps barring over/underflow.
-
- Overflow is possible only if RT1 is within a factor of 5 of overflow.
- Underflow is harmless if the input data is 0 or exceeds
- underflow_threshold / macheps.
-
- =====================================================================
-
-
- Compute the eigenvalues
-*/
-
- sm = *a + *c__;
- df = *a - *c__;
- adf = abs(df);
- tb = *b + *b;
- ab = abs(tb);
- if (abs(*a) > abs(*c__)) {
- acmx = *a;
- acmn = *c__;
- } else {
- acmx = *c__;
- acmn = *a;
- }
- if (adf > ab) {
-/* Computing 2nd power */
- d__1 = ab / adf;
- rt = adf * sqrt(d__1 * d__1 + 1.);
- } else if (adf < ab) {
-/* Computing 2nd power */
- d__1 = adf / ab;
- rt = ab * sqrt(d__1 * d__1 + 1.);
- } else {
-
-/* Includes case AB=ADF=0 */
-
- rt = ab * sqrt(2.);
- }
- if (sm < 0.) {
- *rt1 = (sm - rt) * .5;
- sgn1 = -1;
-
-/*
- Order of execution important.
- To get fully accurate smaller eigenvalue,
- next line needs to be executed in higher precision.
-*/
-
- *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
- } else if (sm > 0.) {
- *rt1 = (sm + rt) * .5;
- sgn1 = 1;
-
-/*
- Order of execution important.
- To get fully accurate smaller eigenvalue,
- next line needs to be executed in higher precision.
-*/
-
- *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
- } else {
-
-/* Includes case RT1 = RT2 = 0 */
-
- *rt1 = rt * .5;
- *rt2 = rt * -.5;
- sgn1 = 1;
- }
-
-/* Compute the eigenvector */
-
- if (df >= 0.) {
- cs = df + rt;
- sgn2 = 1;
- } else {
- cs = df - rt;
- sgn2 = -1;
- }
- acs = abs(cs);
- if (acs > ab) {
- ct = -tb / cs;
- *sn1 = 1. / sqrt(ct * ct + 1.);
- *cs1 = ct * *sn1;
- } else {
- if (ab == 0.) {
- *cs1 = 1.;
- *sn1 = 0.;
- } else {
- tn = -cs / tb;
- *cs1 = 1. / sqrt(tn * tn + 1.);
- *sn1 = tn * *cs1;
- }
- }
- if (sgn1 == sgn2) {
- tn = *cs1;
- *cs1 = -(*sn1);
- *sn1 = tn;
- }
- return 0;
-
-/* End of DLAEV2 */
-
-} /* dlaev2_ */
-
-/* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n,
- integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
- *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
- integer *ldz, integer *info)
-{
- /* System generated locals */
- integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer i__, j, k, l, m;
- static doublereal s, v[3];
- static integer i1, i2;
- static doublereal t1, t2, t3, v1, v2, v3, h00, h10, h11, h12, h21, h22,
- h33, h44;
- static integer nh;
- static doublereal cs;
- static integer nr;
- static doublereal sn;
- static integer nz;
- static doublereal ave, h33s, h44s;
- static integer itn, its;
- static doublereal ulp, sum, tst1, h43h34, disc, unfl, ovfl;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static doublereal work[1];
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *), dlanv2_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(
- doublereal *, doublereal *);
-
- extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
- integer *, doublereal *);
- extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
- doublereal *);
- static doublereal smlnum;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAHQR is an auxiliary routine called by DHSEQR to update the
- eigenvalues and Schur decomposition already computed by DHSEQR, by
- dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
-
- Arguments
- =========
-
- WANTT (input) LOGICAL
- = .TRUE. : the full Schur form T is required;
- = .FALSE.: only eigenvalues are required.
-
- WANTZ (input) LOGICAL
- = .TRUE. : the matrix of Schur vectors Z is required;
- = .FALSE.: Schur vectors are not required.
-
- N (input) INTEGER
- The order of the matrix H. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- It is assumed that H is already upper quasi-triangular in
- rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless
- ILO = 1). DLAHQR works primarily with the Hessenberg
- submatrix in rows and columns ILO to IHI, but applies
- transformations to all of H if WANTT is .TRUE..
- 1 <= ILO <= max(1,IHI); IHI <= N.
-
- H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
- On entry, the upper Hessenberg matrix H.
- On exit, if WANTT is .TRUE., H is upper quasi-triangular in
- rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in
- standard form. If WANTT is .FALSE., the contents of H are
- unspecified on exit.
-
- LDH (input) INTEGER
- The leading dimension of the array H. LDH >= max(1,N).
-
- WR (output) DOUBLE PRECISION array, dimension (N)
- WI (output) DOUBLE PRECISION array, dimension (N)
- The real and imaginary parts, respectively, of the computed
- eigenvalues ILO to IHI are stored in the corresponding
- elements of WR and WI. If two eigenvalues are computed as a
- complex conjugate pair, they are stored in consecutive
- elements of WR and WI, say the i-th and (i+1)th, with
- WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the
- eigenvalues are stored in the same order as on the diagonal
- of the Schur form returned in H, with WR(i) = H(i,i), and, if
- H(i:i+1,i:i+1) is a 2-by-2 diagonal block,
- WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i).
-
- ILOZ (input) INTEGER
- IHIZ (input) INTEGER
- Specify the rows of Z to which transformations must be
- applied if WANTZ is .TRUE..
- 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
-
- Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
- If WANTZ is .TRUE., on entry Z must contain the current
- matrix Z of transformations accumulated by DHSEQR, and on
- exit Z has been updated; transformations are applied only to
- the submatrix Z(ILOZ:IHIZ,ILO:IHI).
- If WANTZ is .FALSE., Z is not referenced.
-
- LDZ (input) INTEGER
- The leading dimension of the array Z. LDZ >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- > 0: DLAHQR failed to compute all the eigenvalues ILO to IHI
- in a total of 30*(IHI-ILO+1) iterations; if INFO = i,
- elements i+1:ihi of WR and WI contain those eigenvalues
- which have been successfully computed.
-
- Further Details
- ===============
-
- 2-96 Based on modifications by
- David Day, Sandia National Laboratory, USA
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- h_dim1 = *ldh;
- h_offset = 1 + h_dim1 * 1;
- h__ -= h_offset;
- --wr;
- --wi;
- z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
-
- /* Function Body */
- *info = 0;
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
- if (*ilo == *ihi) {
- wr[*ilo] = h__[*ilo + *ilo * h_dim1];
- wi[*ilo] = 0.;
- return 0;
- }
-
- nh = *ihi - *ilo + 1;
- nz = *ihiz - *iloz + 1;
-
-/*
- Set machine-dependent constants for the stopping criterion.
- If norm(H) <= sqrt(OVFL), overflow should not occur.
-*/
-
- unfl = SAFEMINIMUM;
- ovfl = 1. / unfl;
- dlabad_(&unfl, &ovfl);
- ulp = PRECISION;
- smlnum = unfl * (nh / ulp);
-
-/*
- I1 and I2 are the indices of the first row and last column of H
- to which transformations must be applied. If eigenvalues only are
- being computed, I1 and I2 are set inside the main loop.
-*/
-
- if (*wantt) {
- i1 = 1;
- i2 = *n;
- }
-
-/* ITN is the total number of QR iterations allowed. */
-
- itn = nh * 30;
-
-/*
- The main loop begins here. I is the loop index and decreases from
- IHI to ILO in steps of 1 or 2. Each iteration of the loop works
- with the active submatrix in rows and columns L to I.
- Eigenvalues I+1 to IHI have already converged. Either L = ILO or
- H(L,L-1) is negligible so that the matrix splits.
-*/
-
- i__ = *ihi;
-L10:
- l = *ilo;
- if (i__ < *ilo) {
- goto L150;
- }
-
-/*
- Perform QR iterations on rows and columns ILO to I until a
- submatrix of order 1 or 2 splits off at the bottom because a
- subdiagonal element has become negligible.
-*/
-
- i__1 = itn;
- for (its = 0; its <= i__1; ++its) {
-
-/* Look for a single small subdiagonal element. */
-
- i__2 = l + 1;
- for (k = i__; k >= i__2; --k) {
- tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
- h__[k + k * h_dim1], abs(d__2));
- if (tst1 == 0.) {
- i__3 = i__ - l + 1;
- tst1 = dlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, work);
- }
-/* Computing MAX */
- d__2 = ulp * tst1;
- if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
- smlnum)) {
- goto L30;
- }
-/* L20: */
- }
-L30:
- l = k;
- if (l > *ilo) {
-
-/* H(L,L-1) is negligible */
-
- h__[l + (l - 1) * h_dim1] = 0.;
- }
-
-/* Exit from loop if a submatrix of order 1 or 2 has split off. */
-
- if (l >= i__ - 1) {
- goto L140;
- }
-
-/*
- Now the active submatrix is in rows and columns L to I. If
- eigenvalues only are being computed, only the active submatrix
- need be transformed.
-*/
-
- if (! (*wantt)) {
- i1 = l;
- i2 = i__;
- }
-
- if (its == 10 || its == 20) {
-
-/* Exceptional shift. */
-
- s = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 =
- h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2));
- h44 = s * .75 + h__[i__ + i__ * h_dim1];
- h33 = h44;
- h43h34 = s * -.4375 * s;
- } else {
-
-/*
- Prepare to use Francis' double shift
- (i.e. 2nd degree generalized Rayleigh quotient)
-*/
-
- h44 = h__[i__ + i__ * h_dim1];
- h33 = h__[i__ - 1 + (i__ - 1) * h_dim1];
- h43h34 = h__[i__ + (i__ - 1) * h_dim1] * h__[i__ - 1 + i__ *
- h_dim1];
- s = h__[i__ - 1 + (i__ - 2) * h_dim1] * h__[i__ - 1 + (i__ - 2) *
- h_dim1];
- disc = (h33 - h44) * .5;
- disc = disc * disc + h43h34;
- if (disc > 0.) {
-
-/* Real roots: use Wilkinson's shift twice */
-
- disc = sqrt(disc);
- ave = (h33 + h44) * .5;
- if (abs(h33) - abs(h44) > 0.) {
- h33 = h33 * h44 - h43h34;
- h44 = h33 / (d_sign(&disc, &ave) + ave);
- } else {
- h44 = d_sign(&disc, &ave) + ave;
- }
- h33 = h44;
- h43h34 = 0.;
- }
- }
-
-/* Look for two consecutive small subdiagonal elements. */
-
- i__2 = l;
- for (m = i__ - 2; m >= i__2; --m) {
-/*
- Determine the effect of starting the double-shift QR
- iteration at row M, and see if this would make H(M,M-1)
- negligible.
-*/
-
- h11 = h__[m + m * h_dim1];
- h22 = h__[m + 1 + (m + 1) * h_dim1];
- h21 = h__[m + 1 + m * h_dim1];
- h12 = h__[m + (m + 1) * h_dim1];
- h44s = h44 - h11;
- h33s = h33 - h11;
- v1 = (h33s * h44s - h43h34) / h21 + h12;
- v2 = h22 - h11 - h33s - h44s;
- v3 = h__[m + 2 + (m + 1) * h_dim1];
- s = abs(v1) + abs(v2) + abs(v3);
- v1 /= s;
- v2 /= s;
- v3 /= s;
- v[0] = v1;
- v[1] = v2;
- v[2] = v3;
- if (m == l) {
- goto L50;
- }
- h00 = h__[m - 1 + (m - 1) * h_dim1];
- h10 = h__[m + (m - 1) * h_dim1];
- tst1 = abs(v1) * (abs(h00) + abs(h11) + abs(h22));
- if (abs(h10) * (abs(v2) + abs(v3)) <= ulp * tst1) {
- goto L50;
- }
-/* L40: */
- }
-L50:
-
-/* Double-shift QR step */
-
- i__2 = i__ - 1;
- for (k = m; k <= i__2; ++k) {
-
-/*
- The first iteration of this loop determines a reflection G
- from the vector V and applies it from left and right to H,
- thus creating a nonzero bulge below the subdiagonal.
-
- Each subsequent iteration determines a reflection G to
- restore the Hessenberg form in the (K-1)th column, and thus
- chases the bulge one step toward the bottom of the active
- submatrix. NR is the order of G.
-
- Computing MIN
-*/
- i__3 = 3, i__4 = i__ - k + 1;
- nr = min(i__3,i__4);
- if (k > m) {
- dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
- }
- dlarfg_(&nr, v, &v[1], &c__1, &t1);
- if (k > m) {
- h__[k + (k - 1) * h_dim1] = v[0];
- h__[k + 1 + (k - 1) * h_dim1] = 0.;
- if (k < i__ - 1) {
- h__[k + 2 + (k - 1) * h_dim1] = 0.;
- }
- } else if (m > l) {
- h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1];
- }
- v2 = v[1];
- t2 = t1 * v2;
- if (nr == 3) {
- v3 = v[2];
- t3 = t1 * v3;
-
-/*
- Apply G from the left to transform the rows of the matrix
- in columns K to I2.
-*/
-
- i__3 = i2;
- for (j = k; j <= i__3; ++j) {
- sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
- + v3 * h__[k + 2 + j * h_dim1];
- h__[k + j * h_dim1] -= sum * t1;
- h__[k + 1 + j * h_dim1] -= sum * t2;
- h__[k + 2 + j * h_dim1] -= sum * t3;
-/* L60: */
- }
-
-/*
- Apply G from the right to transform the columns of the
- matrix in rows I1 to min(K+3,I).
-
- Computing MIN
-*/
- i__4 = k + 3;
- i__3 = min(i__4,i__);
- for (j = i1; j <= i__3; ++j) {
- sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
- + v3 * h__[j + (k + 2) * h_dim1];
- h__[j + k * h_dim1] -= sum * t1;
- h__[j + (k + 1) * h_dim1] -= sum * t2;
- h__[j + (k + 2) * h_dim1] -= sum * t3;
-/* L70: */
- }
-
- if (*wantz) {
-
-/* Accumulate transformations in the matrix Z */
-
- i__3 = *ihiz;
- for (j = *iloz; j <= i__3; ++j) {
- sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
- z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
- z__[j + k * z_dim1] -= sum * t1;
- z__[j + (k + 1) * z_dim1] -= sum * t2;
- z__[j + (k + 2) * z_dim1] -= sum * t3;
-/* L80: */
- }
- }
- } else if (nr == 2) {
-
-/*
- Apply G from the left to transform the rows of the matrix
- in columns K to I2.
-*/
-
- i__3 = i2;
- for (j = k; j <= i__3; ++j) {
- sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
- h__[k + j * h_dim1] -= sum * t1;
- h__[k + 1 + j * h_dim1] -= sum * t2;
-/* L90: */
- }
-
-/*
- Apply G from the right to transform the columns of the
- matrix in rows I1 to min(K+3,I).
-*/
-
- i__3 = i__;
- for (j = i1; j <= i__3; ++j) {
- sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
- ;
- h__[j + k * h_dim1] -= sum * t1;
- h__[j + (k + 1) * h_dim1] -= sum * t2;
-/* L100: */
- }
-
- if (*wantz) {
-
-/* Accumulate transformations in the matrix Z */
-
- i__3 = *ihiz;
- for (j = *iloz; j <= i__3; ++j) {
- sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
- z_dim1];
- z__[j + k * z_dim1] -= sum * t1;
- z__[j + (k + 1) * z_dim1] -= sum * t2;
-/* L110: */
- }
- }
- }
-/* L120: */
- }
-
-/* L130: */
- }
-
-/* Failure to converge in remaining number of iterations */
-
- *info = i__;
- return 0;
-
-L140:
-
- if (l == i__) {
-
-/* H(I,I-1) is negligible: one eigenvalue has converged. */
-
- wr[i__] = h__[i__ + i__ * h_dim1];
- wi[i__] = 0.;
- } else if (l == i__ - 1) {
-
-/*
- H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
-
- Transform the 2-by-2 submatrix to standard Schur form,
- and compute and store the eigenvalues.
-*/
-
- dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
- h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
- h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
- &sn);
-
- if (*wantt) {
-
-/* Apply the transformation to the rest of H. */
-
- if (i2 > i__) {
- i__1 = i2 - i__;
- drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
- i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
- }
- i__1 = i__ - i1 - 1;
- drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
- h_dim1], &c__1, &cs, &sn);
- }
- if (*wantz) {
-
-/* Apply the transformation to Z. */
-
- drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
- i__ * z_dim1], &c__1, &cs, &sn);
- }
- }
-
-/*
- Decrement number of remaining iterations, and return to start of
- the main loop with new value of I.
-*/
-
- itn -= its;
- i__ = l - 1;
- goto L10;
-
-L150:
- return 0;
-
-/* End of DLAHQR */
-
-} /* dlahqr_ */
-
-/* Subroutine */ int dlahrd_(integer *n, integer *k, integer *nb, doublereal *
- a, integer *lda, doublereal *tau, doublereal *t, integer *ldt,
- doublereal *y, integer *ldy)
-{
- /* System generated locals */
- integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
- i__3;
- doublereal d__1;
-
- /* Local variables */
- static integer i__;
- static doublereal ei;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *), dgemv_(char *, integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *), dcopy_(integer *, doublereal *,
- integer *, doublereal *, integer *), daxpy_(integer *, doublereal
- *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char
- *, char *, char *, integer *, doublereal *, integer *, doublereal
- *, integer *), dlarfg_(integer *,
- doublereal *, doublereal *, integer *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLAHRD reduces the first NB columns of a real general n-by-(n-k+1)
- matrix A so that elements below the k-th subdiagonal are zero. The
- reduction is performed by an orthogonal similarity transformation
- Q' * A * Q. The routine returns the matrices V and T which determine
- Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
-
- This is an auxiliary routine called by DGEHRD.
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the matrix A.
-
- K (input) INTEGER
- The offset for the reduction. Elements below the k-th
- subdiagonal in the first NB columns are reduced to zero.
-
- NB (input) INTEGER
- The number of columns to be reduced.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1)
- On entry, the n-by-(n-k+1) general matrix A.
- On exit, the elements on and above the k-th subdiagonal in
- the first NB columns are overwritten with the corresponding
- elements of the reduced matrix; the elements below the k-th
- subdiagonal, with the array TAU, represent the matrix Q as a
- product of elementary reflectors. The other columns of A are
- unchanged. See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- TAU (output) DOUBLE PRECISION array, dimension (NB)
- The scalar factors of the elementary reflectors. See Further
- Details.
-
- T (output) DOUBLE PRECISION array, dimension (LDT,NB)
- The upper triangular matrix T.
-
- LDT (input) INTEGER
- The leading dimension of the array T. LDT >= NB.
-
- Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
- The n-by-nb matrix Y.
-
- LDY (input) INTEGER
- The leading dimension of the array Y. LDY >= N.
-
- Further Details
- ===============
-
- The matrix Q is represented as a product of nb elementary reflectors
-
- Q = H(1) H(2) . . . H(nb).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
- A(i+k+1:n,i), and tau in TAU(i).
-
- The elements of the vectors v together form the (n-k+1)-by-nb matrix
- V which is needed, with T and Y, to apply the transformation to the
- unreduced part of the matrix, using an update of the form:
- A := (I - V*T*V') * (A - Y*V').
-
- The contents of A on exit are illustrated by the following example
- with n = 7, k = 3 and nb = 2:
-
- ( a h a a a )
- ( a h a a a )
- ( a h a a a )
- ( h h a a a )
- ( v1 h a a a )
- ( v1 v2 a a a )
- ( v1 v2 a a a )
-
- where a denotes an element of the original matrix A, h denotes a
- modified element of the upper Hessenberg matrix H, and vi denotes an
- element of the vector defining H(i).
-
- =====================================================================
-
-
- Quick return if possible
-*/
-
- /* Parameter adjustments */
- --tau;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
- t -= t_offset;
- y_dim1 = *ldy;
- y_offset = 1 + y_dim1 * 1;
- y -= y_offset;
-
- /* Function Body */
- if (*n <= 1) {
- return 0;
- }
-
- i__1 = *nb;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (i__ > 1) {
-
-/*
- Update A(1:n,i)
-
- Compute i-th column of A - Y * V'
-*/
-
- i__2 = i__ - 1;
- dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &a[*
- k + i__ - 1 + a_dim1], lda, &c_b15, &a[i__ * a_dim1 + 1],
- &c__1);
-
-/*
- Apply I - V * T' * V' to this column (call it b) from the
- left, using the last column of T as workspace
-
- Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
- ( V2 ) ( b2 )
-
- where V1 is unit lower triangular
-
- w := V1' * b1
-*/
-
- i__2 = i__ - 1;
- dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
- 1], &c__1);
- i__2 = i__ - 1;
- dtrmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
- lda, &t[*nb * t_dim1 + 1], &c__1);
-
-/* w := w + V2'*b2 */
-
- i__2 = *n - *k - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1],
- lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b15, &t[*nb *
- t_dim1 + 1], &c__1);
-
-/* w := T'*w */
-
- i__2 = i__ - 1;
- dtrmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
- &t[*nb * t_dim1 + 1], &c__1);
-
-/* b2 := b2 - V2*w */
-
- i__2 = *n - *k - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[*k + i__ +
- a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b15, &a[*k
- + i__ + i__ * a_dim1], &c__1);
-
-/* b1 := b1 - V1*w */
-
- i__2 = i__ - 1;
- dtrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
- , lda, &t[*nb * t_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- daxpy_(&i__2, &c_b151, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 +
- i__ * a_dim1], &c__1);
-
- a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
- }
-
-/*
- Generate the elementary reflector H(i) to annihilate
- A(k+i+1:n,i)
-*/
-
- i__2 = *n - *k - i__ + 1;
-/* Computing MIN */
- i__3 = *k + i__ + 1;
- dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ *
- a_dim1], &c__1, &tau[i__]);
- ei = a[*k + i__ + i__ * a_dim1];
- a[*k + i__ + i__ * a_dim1] = 1.;
-
-/* Compute Y(1:n,i) */
-
- i__2 = *n - *k - i__ + 1;
- dgemv_("No transpose", n, &i__2, &c_b15, &a[(i__ + 1) * a_dim1 + 1],
- lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &y[i__ *
- y_dim1 + 1], &c__1);
- i__2 = *n - *k - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[*k + i__ + a_dim1], lda,
- &a[*k + i__ + i__ * a_dim1], &c__1, &c_b29, &t[i__ * t_dim1 +
- 1], &c__1);
- i__2 = i__ - 1;
- dgemv_("No transpose", n, &i__2, &c_b151, &y[y_offset], ldy, &t[i__ *
- t_dim1 + 1], &c__1, &c_b15, &y[i__ * y_dim1 + 1], &c__1);
- dscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
-
-/* Compute T(1:i,i) */
-
- i__2 = i__ - 1;
- d__1 = -tau[i__];
- dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
- &t[i__ * t_dim1 + 1], &c__1)
- ;
- t[i__ + i__ * t_dim1] = tau[i__];
-
-/* L10: */
- }
- a[*k + *nb + *nb * a_dim1] = ei;
-
- return 0;
-
-/* End of DLAHRD */
-
-} /* dlahrd_ */
-
-/* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw,
- doublereal *smin, doublereal *ca, doublereal *a, integer *lda,
- doublereal *d1, doublereal *d2, doublereal *b, integer *ldb,
- doublereal *wr, doublereal *wi, doublereal *x, integer *ldx,
- doublereal *scale, doublereal *xnorm, integer *info)
-{
- /* Initialized data */
-
- static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
- static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
- static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
- 4,3,2,1 };
-
- /* System generated locals */
- integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
- doublereal d__1, d__2, d__3, d__4, d__5, d__6;
- static doublereal equiv_0[4], equiv_1[4];
-
- /* Local variables */
- static integer j;
-#define ci (equiv_0)
-#define cr (equiv_1)
- static doublereal bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22,
- cr21, cr22, li21, csi, ui11, lr21, ui12, ui22;
-#define civ (equiv_0)
- static doublereal csr, ur11, ur12, ur22;
-#define crv (equiv_1)
- static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
- static integer icmax;
- static doublereal bnorm, cnorm, smini;
-
- extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *);
- static doublereal bignum, smlnum;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLALN2 solves a system of the form (ca A - w D ) X = s B
- or (ca A' - w D) X = s B with possible scaling ("s") and
- perturbation of A. (A' means A-transpose.)
-
- A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA
- real diagonal matrix, w is a real or complex value, and X and B are
- NA x 1 matrices -- real if w is real, complex if w is complex. NA
- may be 1 or 2.
-
- If w is complex, X and B are represented as NA x 2 matrices,
- the first column of each being the real part and the second
- being the imaginary part.
-
- "s" is a scaling factor (.LE. 1), computed by DLALN2, which is
- so chosen that X can be computed without overflow. X is further
- scaled if necessary to assure that norm(ca A - w D)*norm(X) is less
- than overflow.
-
- If both singular values of (ca A - w D) are less than SMIN,
- SMIN*identity will be used instead of (ca A - w D). If only one
- singular value is less than SMIN, one element of (ca A - w D) will be
- perturbed enough to make the smallest singular value roughly SMIN.
- If both singular values are at least SMIN, (ca A - w D) will not be
- perturbed. In any case, the perturbation will be at most some small
- multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values
- are computed by infinity-norm approximations, and thus will only be
- correct to a factor of 2 or so.
-
- Note: all input quantities are assumed to be smaller than overflow
- by a reasonable factor. (See BIGNUM.)
-
- Arguments
- ==========
-
- LTRANS (input) LOGICAL
- =.TRUE.: A-transpose will be used.
- =.FALSE.: A will be used (not transposed.)
-
- NA (input) INTEGER
- The size of the matrix A. It may (only) be 1 or 2.
-
- NW (input) INTEGER
- 1 if "w" is real, 2 if "w" is complex. It may only be 1
- or 2.
-
- SMIN (input) DOUBLE PRECISION
- The desired lower bound on the singular values of A. This
- should be a safe distance away from underflow or overflow,
- say, between (underflow/machine precision) and (machine
- precision * overflow ). (See BIGNUM and ULP.)
-
- CA (input) DOUBLE PRECISION
- The coefficient c, which A is multiplied by.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,NA)
- The NA x NA matrix A.
-
- LDA (input) INTEGER
- The leading dimension of A. It must be at least NA.
-
- D1 (input) DOUBLE PRECISION
- The 1,1 element in the diagonal matrix D.
-
- D2 (input) DOUBLE PRECISION
- The 2,2 element in the diagonal matrix D. Not used if NW=1.
-
- B (input) DOUBLE PRECISION array, dimension (LDB,NW)
- The NA x NW matrix B (right-hand side). If NW=2 ("w" is
- complex), column 1 contains the real part of B and column 2
- contains the imaginary part.
-
- LDB (input) INTEGER
- The leading dimension of B. It must be at least NA.
-
- WR (input) DOUBLE PRECISION
- The real part of the scalar "w".
-
- WI (input) DOUBLE PRECISION
- The imaginary part of the scalar "w". Not used if NW=1.
-
- X (output) DOUBLE PRECISION array, dimension (LDX,NW)
- The NA x NW matrix X (unknowns), as computed by DLALN2.
- If NW=2 ("w" is complex), on exit, column 1 will contain
- the real part of X and column 2 will contain the imaginary
- part.
-
- LDX (input) INTEGER
- The leading dimension of X. It must be at least NA.
-
- SCALE (output) DOUBLE PRECISION
- The scale factor that B must be multiplied by to insure
- that overflow does not occur when computing X. Thus,
- (ca A - w D) X will be SCALE*B, not B (ignoring
- perturbations of A.) It will be at most 1.
-
- XNORM (output) DOUBLE PRECISION
- The infinity-norm of X, when X is regarded as an NA x NW
- real matrix.
-
- INFO (output) INTEGER
- An error flag. It will be set to zero if no error occurs,
- a negative number if an argument is in error, or a positive
- number if ca A - w D had to be perturbed.
- The possible values are:
- = 0: No error occurred, and (ca A - w D) did not have to be
- perturbed.
- = 1: (ca A - w D) had to be perturbed to make its smallest
- (or only) singular value greater than SMIN.
- NOTE: In the interests of speed, this routine does not
- check the inputs for errors.
-
- =====================================================================
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- x_dim1 = *ldx;
- x_offset = 1 + x_dim1 * 1;
- x -= x_offset;
-
- /* Function Body */
-
-/* Compute BIGNUM */
-
- smlnum = 2. * SAFEMINIMUM;
- bignum = 1. / smlnum;
- smini = max(*smin,smlnum);
-
-/* Don't check for input errors */
-
- *info = 0;
-
-/* Standard Initializations */
-
- *scale = 1.;
-
- if (*na == 1) {
-
-/* 1 x 1 (i.e., scalar) system C X = B */
-
- if (*nw == 1) {
-
-/*
- Real 1x1 system.
-
- C = ca A - w D
-*/
-
- csr = *ca * a[a_dim1 + 1] - *wr * *d1;
- cnorm = abs(csr);
-
-/* If | C | < SMINI, use C = SMINI */
-
- if (cnorm < smini) {
- csr = smini;
- cnorm = smini;
- *info = 1;
- }
-
-/* Check scaling for X = B / C */
-
- bnorm = (d__1 = b[b_dim1 + 1], abs(d__1));
- if ((cnorm < 1. && bnorm > 1.)) {
- if (bnorm > bignum * cnorm) {
- *scale = 1. / bnorm;
- }
- }
-
-/* Compute X */
-
- x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
- *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1));
- } else {
-
-/*
- Complex 1x1 system (w is complex)
-
- C = ca A - w D
-*/
-
- csr = *ca * a[a_dim1 + 1] - *wr * *d1;
- csi = -(*wi) * *d1;
- cnorm = abs(csr) + abs(csi);
-
-/* If | C | < SMINI, use C = SMINI */
-
- if (cnorm < smini) {
- csr = smini;
- csi = 0.;
- cnorm = smini;
- *info = 1;
- }
-
-/* Check scaling for X = B / C */
-
- bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1) <<
- (1)) + 1], abs(d__2));
- if ((cnorm < 1. && bnorm > 1.)) {
- if (bnorm > bignum * cnorm) {
- *scale = 1. / bnorm;
- }
- }
-
-/* Compute X */
-
- d__1 = *scale * b[b_dim1 + 1];
- d__2 = *scale * b[((b_dim1) << (1)) + 1];
- dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[((x_dim1) <<
- (1)) + 1]);
- *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[((x_dim1)
- << (1)) + 1], abs(d__2));
- }
-
- } else {
-
-/*
- 2x2 System
-
- Compute the real part of C = ca A - w D (or ca A' - w D )
-*/
-
- cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
- cr[3] = *ca * a[((a_dim1) << (1)) + 2] - *wr * *d2;
- if (*ltrans) {
- cr[2] = *ca * a[a_dim1 + 2];
- cr[1] = *ca * a[((a_dim1) << (1)) + 1];
- } else {
- cr[1] = *ca * a[a_dim1 + 2];
- cr[2] = *ca * a[((a_dim1) << (1)) + 1];
- }
-
- if (*nw == 1) {
-
-/*
- Real 2x2 system (w is real)
-
- Find the largest element in C
-*/
-
- cmax = 0.;
- icmax = 0;
-
- for (j = 1; j <= 4; ++j) {
- if ((d__1 = crv[j - 1], abs(d__1)) > cmax) {
- cmax = (d__1 = crv[j - 1], abs(d__1));
- icmax = j;
- }
-/* L10: */
- }
-
-/* If norm(C) < SMINI, use SMINI*identity. */
-
- if (cmax < smini) {
-/* Computing MAX */
- d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[
- b_dim1 + 2], abs(d__2));
- bnorm = max(d__3,d__4);
- if ((smini < 1. && bnorm > 1.)) {
- if (bnorm > bignum * smini) {
- *scale = 1. / bnorm;
- }
- }
- temp = *scale / smini;
- x[x_dim1 + 1] = temp * b[b_dim1 + 1];
- x[x_dim1 + 2] = temp * b[b_dim1 + 2];
- *xnorm = temp * bnorm;
- *info = 1;
- return 0;
- }
-
-/* Gaussian elimination with complete pivoting. */
-
- ur11 = crv[icmax - 1];
- cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
- ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
- cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
- ur11r = 1. / ur11;
- lr21 = ur11r * cr21;
- ur22 = cr22 - ur12 * lr21;
-
-/* If smaller pivot < SMINI, use SMINI */
-
- if (abs(ur22) < smini) {
- ur22 = smini;
- *info = 1;
- }
- if (rswap[icmax - 1]) {
- br1 = b[b_dim1 + 2];
- br2 = b[b_dim1 + 1];
- } else {
- br1 = b[b_dim1 + 1];
- br2 = b[b_dim1 + 2];
- }
- br2 -= lr21 * br1;
-/* Computing MAX */
- d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2);
- bbnd = max(d__2,d__3);
- if ((bbnd > 1. && abs(ur22) < 1.)) {
- if (bbnd >= bignum * abs(ur22)) {
- *scale = 1. / bbnd;
- }
- }
-
- xr2 = br2 * *scale / ur22;
- xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
- if (zswap[icmax - 1]) {
- x[x_dim1 + 1] = xr2;
- x[x_dim1 + 2] = xr1;
- } else {
- x[x_dim1 + 1] = xr1;
- x[x_dim1 + 2] = xr2;
- }
-/* Computing MAX */
- d__1 = abs(xr1), d__2 = abs(xr2);
- *xnorm = max(d__1,d__2);
-
-/* Further scaling if norm(A) norm(X) > overflow */
-
- if ((*xnorm > 1. && cmax > 1.)) {
- if (*xnorm > bignum / cmax) {
- temp = cmax / bignum;
- x[x_dim1 + 1] = temp * x[x_dim1 + 1];
- x[x_dim1 + 2] = temp * x[x_dim1 + 2];
- *xnorm = temp * *xnorm;
- *scale = temp * *scale;
- }
- }
- } else {
-
-/*
- Complex 2x2 system (w is complex)
-
- Find the largest element in C
-*/
-
- ci[0] = -(*wi) * *d1;
- ci[1] = 0.;
- ci[2] = 0.;
- ci[3] = -(*wi) * *d2;
- cmax = 0.;
- icmax = 0;
-
- for (j = 1; j <= 4; ++j) {
- if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs(
- d__2)) > cmax) {
- cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1]
- , abs(d__2));
- icmax = j;
- }
-/* L20: */
- }
-
-/* If norm(C) < SMINI, use SMINI*identity. */
-
- if (cmax < smini) {
-/* Computing MAX */
- d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1)
- << (1)) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 +
- 2], abs(d__3)) + (d__4 = b[((b_dim1) << (1)) + 2],
- abs(d__4));
- bnorm = max(d__5,d__6);
- if ((smini < 1. && bnorm > 1.)) {
- if (bnorm > bignum * smini) {
- *scale = 1. / bnorm;
- }
- }
- temp = *scale / smini;
- x[x_dim1 + 1] = temp * b[b_dim1 + 1];
- x[x_dim1 + 2] = temp * b[b_dim1 + 2];
- x[((x_dim1) << (1)) + 1] = temp * b[((b_dim1) << (1)) + 1];
- x[((x_dim1) << (1)) + 2] = temp * b[((b_dim1) << (1)) + 2];
- *xnorm = temp * bnorm;
- *info = 1;
- return 0;
- }
-
-/* Gaussian elimination with complete pivoting. */
-
- ur11 = crv[icmax - 1];
- ui11 = civ[icmax - 1];
- cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
- ci21 = civ[ipivot[((icmax) << (2)) - 3] - 1];
- ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
- ui12 = civ[ipivot[((icmax) << (2)) - 2] - 1];
- cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
- ci22 = civ[ipivot[((icmax) << (2)) - 1] - 1];
- if (icmax == 1 || icmax == 4) {
-
-/* Code when off-diagonals of pivoted C are real */
-
- if (abs(ur11) > abs(ui11)) {
- temp = ui11 / ur11;
-/* Computing 2nd power */
- d__1 = temp;
- ur11r = 1. / (ur11 * (d__1 * d__1 + 1.));
- ui11r = -temp * ur11r;
- } else {
- temp = ur11 / ui11;
-/* Computing 2nd power */
- d__1 = temp;
- ui11r = -1. / (ui11 * (d__1 * d__1 + 1.));
- ur11r = -temp * ui11r;
- }
- lr21 = cr21 * ur11r;
- li21 = cr21 * ui11r;
- ur12s = ur12 * ur11r;
- ui12s = ur12 * ui11r;
- ur22 = cr22 - ur12 * lr21;
- ui22 = ci22 - ur12 * li21;
- } else {
-
-/* Code when diagonals of pivoted C are real */
-
- ur11r = 1. / ur11;
- ui11r = 0.;
- lr21 = cr21 * ur11r;
- li21 = ci21 * ur11r;
- ur12s = ur12 * ur11r;
- ui12s = ui12 * ur11r;
- ur22 = cr22 - ur12 * lr21 + ui12 * li21;
- ui22 = -ur12 * li21 - ui12 * lr21;
- }
- u22abs = abs(ur22) + abs(ui22);
-
-/* If smaller pivot < SMINI, use SMINI */
-
- if (u22abs < smini) {
- ur22 = smini;
- ui22 = 0.;
- *info = 1;
- }
- if (rswap[icmax - 1]) {
- br2 = b[b_dim1 + 1];
- br1 = b[b_dim1 + 2];
- bi2 = b[((b_dim1) << (1)) + 1];
- bi1 = b[((b_dim1) << (1)) + 2];
- } else {
- br1 = b[b_dim1 + 1];
- br2 = b[b_dim1 + 2];
- bi1 = b[((b_dim1) << (1)) + 1];
- bi2 = b[((b_dim1) << (1)) + 2];
- }
- br2 = br2 - lr21 * br1 + li21 * bi1;
- bi2 = bi2 - li21 * br1 - lr21 * bi1;
-/* Computing MAX */
- d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r))
- ), d__2 = abs(br2) + abs(bi2);
- bbnd = max(d__1,d__2);
- if ((bbnd > 1. && u22abs < 1.)) {
- if (bbnd >= bignum * u22abs) {
- *scale = 1. / bbnd;
- br1 = *scale * br1;
- bi1 = *scale * bi1;
- br2 = *scale * br2;
- bi2 = *scale * bi2;
- }
- }
-
- dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
- xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
- xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
- if (zswap[icmax - 1]) {
- x[x_dim1 + 1] = xr2;
- x[x_dim1 + 2] = xr1;
- x[((x_dim1) << (1)) + 1] = xi2;
- x[((x_dim1) << (1)) + 2] = xi1;
- } else {
- x[x_dim1 + 1] = xr1;
- x[x_dim1 + 2] = xr2;
- x[((x_dim1) << (1)) + 1] = xi1;
- x[((x_dim1) << (1)) + 2] = xi2;
- }
-/* Computing MAX */
- d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2);
- *xnorm = max(d__1,d__2);
-
-/* Further scaling if norm(A) norm(X) > overflow */
-
- if ((*xnorm > 1. && cmax > 1.)) {
- if (*xnorm > bignum / cmax) {
- temp = cmax / bignum;
- x[x_dim1 + 1] = temp * x[x_dim1 + 1];
- x[x_dim1 + 2] = temp * x[x_dim1 + 2];
- x[((x_dim1) << (1)) + 1] = temp * x[((x_dim1) << (1)) + 1]
- ;
- x[((x_dim1) << (1)) + 2] = temp * x[((x_dim1) << (1)) + 2]
- ;
- *xnorm = temp * *xnorm;
- *scale = temp * *scale;
- }
- }
- }
- }
-
- return 0;
-
-/* End of DLALN2 */
-
-} /* dlaln2_ */
-
-#undef crv
-#undef civ
-#undef cr
-#undef ci
-
-
-/* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr,
- integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal
- *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol,
- integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *
- poles, doublereal *difl, doublereal *difr, doublereal *z__, integer *
- k, doublereal *c__, doublereal *s, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
- difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
- poles_offset, i__1, i__2;
- doublereal d__1;
-
- /* Local variables */
- static integer i__, j, m, n;
- static doublereal dj;
- static integer nlp1;
- static doublereal temp;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- static doublereal diflj, difrj, dsigj;
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *), dcopy_(integer *,
- doublereal *, integer *, doublereal *, integer *);
- extern doublereal dlamc3_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlacpy_(char *, integer *, integer
- *, doublereal *, integer *, doublereal *, integer *),
- xerbla_(char *, integer *);
- static doublereal dsigjp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- December 1, 1999
-
-
- Purpose
- =======
-
- DLALS0 applies back the multiplying factors of either the left or the
- right singular vector matrix of a diagonal matrix appended by a row
- to the right hand side matrix B in solving the least squares problem
- using the divide-and-conquer SVD approach.
-
- For the left singular vector matrix, three types of orthogonal
- matrices are involved:
-
- (1L) Givens rotations: the number of such rotations is GIVPTR; the
- pairs of columns/rows they were applied to are stored in GIVCOL;
- and the C- and S-values of these rotations are stored in GIVNUM.
-
- (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
- row, and for J=2:N, PERM(J)-th row of B is to be moved to the
- J-th row.
-
- (3L) The left singular vector matrix of the remaining matrix.
-
- For the right singular vector matrix, four types of orthogonal
- matrices are involved:
-
- (1R) The right singular vector matrix of the remaining matrix.
-
- (2R) If SQRE = 1, one extra Givens rotation to generate the right
- null space.
-
- (3R) The inverse transformation of (2L).
-
- (4R) The inverse transformation of (1L).
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- Specifies whether singular vectors are to be computed in
- factored form:
- = 0: Left singular vector matrix.
- = 1: Right singular vector matrix.
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has row dimension N = NL + NR + 1,
- and column dimension M = N + SQRE.
-
- NRHS (input) INTEGER
- The number of columns of B and BX. NRHS must be at least 1.
-
- B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS )
- On input, B contains the right hand sides of the least
- squares problem in rows 1 through M. On output, B contains
- the solution X in rows 1 through N.
-
- LDB (input) INTEGER
- The leading dimension of B. LDB must be at least
- max(1,MAX( M, N ) ).
-
- BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
-
- LDBX (input) INTEGER
- The leading dimension of BX.
-
- PERM (input) INTEGER array, dimension ( N )
- The permutations (from deflation and sorting) applied
- to the two blocks.
-
- GIVPTR (input) INTEGER
- The number of Givens rotations which took place in this
- subproblem.
-
- GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
- Each pair of numbers indicates a pair of rows/columns
- involved in a Givens rotation.
-
- LDGCOL (input) INTEGER
- The leading dimension of GIVCOL, must be at least N.
-
- GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
- Each number indicates the C or S value used in the
- corresponding Givens rotation.
-
- LDGNUM (input) INTEGER
- The leading dimension of arrays DIFR, POLES and
- GIVNUM, must be at least K.
-
- POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
- On entry, POLES(1:K, 1) contains the new singular
- values obtained from solving the secular equation, and
- POLES(1:K, 2) is an array containing the poles in the secular
- equation.
-
- DIFL (input) DOUBLE PRECISION array, dimension ( K ).
- On entry, DIFL(I) is the distance between I-th updated
- (undeflated) singular value and the I-th (undeflated) old
- singular value.
-
- DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ).
- On entry, DIFR(I, 1) contains the distances between I-th
- updated (undeflated) singular value and the I+1-th
- (undeflated) old singular value. And DIFR(I, 2) is the
- normalizing factor for the I-th right singular vector.
-
- Z (input) DOUBLE PRECISION array, dimension ( K )
- Contain the components of the deflation-adjusted updating row
- vector.
-
- K (input) INTEGER
- Contains the dimension of the non-deflated matrix,
- This is the order of the related secular equation. 1 <= K <=N.
-
- C (input) DOUBLE PRECISION
- C contains garbage if SQRE =0 and the C-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- S (input) DOUBLE PRECISION
- S contains garbage if SQRE =0 and the S-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- WORK (workspace) DOUBLE PRECISION array, dimension ( K )
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Ren-Cang Li, Computer Science Division, University of
- California at Berkeley, USA
- Osni Marques, LBNL/NERSC, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- bx_dim1 = *ldbx;
- bx_offset = 1 + bx_dim1 * 1;
- bx -= bx_offset;
- --perm;
- givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
- givcol -= givcol_offset;
- difr_dim1 = *ldgnum;
- difr_offset = 1 + difr_dim1 * 1;
- difr -= difr_offset;
- poles_dim1 = *ldgnum;
- poles_offset = 1 + poles_dim1 * 1;
- poles -= poles_offset;
- givnum_dim1 = *ldgnum;
- givnum_offset = 1 + givnum_dim1 * 1;
- givnum -= givnum_offset;
- --difl;
- --z__;
- --work;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*nl < 1) {
- *info = -2;
- } else if (*nr < 1) {
- *info = -3;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -4;
- }
-
- n = *nl + *nr + 1;
-
- if (*nrhs < 1) {
- *info = -5;
- } else if (*ldb < n) {
- *info = -7;
- } else if (*ldbx < n) {
- *info = -9;
- } else if (*givptr < 0) {
- *info = -11;
- } else if (*ldgcol < n) {
- *info = -13;
- } else if (*ldgnum < n) {
- *info = -15;
- } else if (*k < 1) {
- *info = -20;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLALS0", &i__1);
- return 0;
- }
-
- m = n + *sqre;
- nlp1 = *nl + 1;
-
- if (*icompq == 0) {
-
-/*
- Apply back orthogonal transformations from the left.
-
- Step (1L): apply back the Givens rotations performed.
-*/
-
- i__1 = *givptr;
- for (i__ = 1; i__ <= i__1; ++i__) {
- drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
- ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
- i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
- ;
-/* L10: */
- }
-
-/* Step (2L): permute rows of B. */
-
- dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
- ldbx);
-/* L20: */
- }
-
-/*
- Step (3L): apply the inverse of the left singular vector
- matrix to BX.
-*/
-
- if (*k == 1) {
- dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
- if (z__[1] < 0.) {
- dscal_(nrhs, &c_b151, &b[b_offset], ldb);
- }
- } else {
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- diflj = difl[j];
- dj = poles[j + poles_dim1];
- dsigj = -poles[j + ((poles_dim1) << (1))];
- if (j < *k) {
- difrj = -difr[j + difr_dim1];
- dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
- }
- if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) {
- work[j] = 0.;
- } else {
- work[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
- diflj / (poles[j + ((poles_dim1) << (1))] + dj);
- }
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
- == 0.) {
- work[i__] = 0.;
- } else {
- work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
- i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
- (1))], &dsigj) - diflj) / (poles[i__ + ((
- poles_dim1) << (1))] + dj);
- }
-/* L30: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
- == 0.) {
- work[i__] = 0.;
- } else {
- work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
- i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
- (1))], &dsigjp) + difrj) / (poles[i__ + ((
- poles_dim1) << (1))] + dj);
- }
-/* L40: */
- }
- work[1] = -1.;
- temp = dnrm2_(k, &work[1], &c__1);
- dgemv_("T", k, nrhs, &c_b15, &bx[bx_offset], ldbx, &work[1], &
- c__1, &c_b29, &b[j + b_dim1], ldb);
- dlascl_("G", &c__0, &c__0, &temp, &c_b15, &c__1, nrhs, &b[j +
- b_dim1], ldb, info);
-/* L50: */
- }
- }
-
-/* Move the deflated rows of BX to B also. */
-
- if (*k < max(m,n)) {
- i__1 = n - *k;
- dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
- + b_dim1], ldb);
- }
- } else {
-
-/*
- Apply back the right orthogonal transformations.
-
- Step (1R): apply back the new right singular vector matrix
- to B.
-*/
-
- if (*k == 1) {
- dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
- } else {
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dsigj = poles[j + ((poles_dim1) << (1))];
- if (z__[j] == 0.) {
- work[j] = 0.;
- } else {
- work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
- poles_dim1]) / difr[j + ((difr_dim1) << (1))];
- }
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- if (z__[j] == 0.) {
- work[i__] = 0.;
- } else {
- d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
- work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
- i__ + difr_dim1]) / (dsigj + poles[i__ +
- poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
- ];
- }
-/* L60: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- if (z__[j] == 0.) {
- work[i__] = 0.;
- } else {
- d__1 = -poles[i__ + ((poles_dim1) << (1))];
- work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
- i__]) / (dsigj + poles[i__ + poles_dim1]) /
- difr[i__ + ((difr_dim1) << (1))];
- }
-/* L70: */
- }
- dgemv_("T", k, nrhs, &c_b15, &b[b_offset], ldb, &work[1], &
- c__1, &c_b29, &bx[j + bx_dim1], ldbx);
-/* L80: */
- }
- }
-
-/*
- Step (2R): if SQRE = 1, apply back the rotation that is
- related to the right null space of the subproblem.
-*/
-
- if (*sqre == 1) {
- dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
- drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
- s);
- }
- if (*k < max(m,n)) {
- i__1 = n - *k;
- dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
- bx_dim1], ldbx);
- }
-
-/* Step (3R): permute rows of B. */
-
- dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
- if (*sqre == 1) {
- dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
- }
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
- ldb);
-/* L90: */
- }
-
-/* Step (4R): apply back the Givens rotations performed. */
-
- for (i__ = *givptr; i__ >= 1; --i__) {
- d__1 = -givnum[i__ + givnum_dim1];
- drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
- ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
- i__ + ((givnum_dim1) << (1))], &d__1);
-/* L100: */
- }
- }
-
- return 0;
-
-/* End of DLALS0 */
-
-} /* dlals0_ */
-
-/* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n,
- integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *
- ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k,
- doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
- poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
- perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
- work, integer *iwork, integer *info)
-{
- /* System generated locals */
- integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
- b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
- difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
- u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
- i__2;
-
- /* Builtin functions */
- integer pow_ii(integer *, integer *);
-
- /* Local variables */
- static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
- ndb1, nlp1, lvl2, nrp1, nlvl, sqre;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer inode, ndiml, ndimr;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *), dlals0_(integer *, integer *, integer *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- integer *, integer *, integer *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, doublereal *, doublereal *, doublereal *,
- integer *), dlasdt_(integer *, integer *, integer *, integer *,
- integer *, integer *, integer *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLALSA is an itermediate step in solving the least squares problem
- by computing the SVD of the coefficient matrix in compact form (The
- singular vectors are computed as products of simple orthorgonal
- matrices.).
-
- If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector
- matrix of an upper bidiagonal matrix to the right hand side; and if
- ICOMPQ = 1, DLALSA applies the right singular vector matrix to the
- right hand side. The singular vector matrices were generated in
- compact form by DLALSA.
-
- Arguments
- =========
-
-
- ICOMPQ (input) INTEGER
- Specifies whether the left or the right singular vector
- matrix is involved.
- = 0: Left singular vector matrix
- = 1: Right singular vector matrix
-
- SMLSIZ (input) INTEGER
- The maximum size of the subproblems at the bottom of the
- computation tree.
-
- N (input) INTEGER
- The row and column dimensions of the upper bidiagonal matrix.
-
- NRHS (input) INTEGER
- The number of columns of B and BX. NRHS must be at least 1.
-
- B (input) DOUBLE PRECISION array, dimension ( LDB, NRHS )
- On input, B contains the right hand sides of the least
- squares problem in rows 1 through M. On output, B contains
- the solution X in rows 1 through N.
-
- LDB (input) INTEGER
- The leading dimension of B in the calling subprogram.
- LDB must be at least max(1,MAX( M, N ) ).
-
- BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
- On exit, the result of applying the left or right singular
- vector matrix to B.
-
- LDBX (input) INTEGER
- The leading dimension of BX.
-
- U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ).
- On entry, U contains the left singular vector matrices of all
- subproblems at the bottom level.
-
- LDU (input) INTEGER, LDU = > N.
- The leading dimension of arrays U, VT, DIFL, DIFR,
- POLES, GIVNUM, and Z.
-
- VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ).
- On entry, VT' contains the right singular vector matrices of
- all subproblems at the bottom level.
-
- K (input) INTEGER array, dimension ( N ).
-
- DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
- where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
-
- DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
- On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
- distances between singular values on the I-th level and
- singular values on the (I -1)-th level, and DIFR(*, 2 * I)
- record the normalizing factors of the right singular vectors
- matrices of subproblems on I-th level.
-
- Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
- On entry, Z(1, I) contains the components of the deflation-
- adjusted updating row vector for subproblems on the I-th
- level.
-
- POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
- On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
- singular values involved in the secular equations on the I-th
- level.
-
- GIVPTR (input) INTEGER array, dimension ( N ).
- On entry, GIVPTR( I ) records the number of Givens
- rotations performed on the I-th problem on the computation
- tree.
-
- GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
- On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
- locations of Givens rotations performed on the I-th level on
- the computation tree.
-
- LDGCOL (input) INTEGER, LDGCOL = > N.
- The leading dimension of arrays GIVCOL and PERM.
-
- PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
- On entry, PERM(*, I) records permutations done on the I-th
- level of the computation tree.
-
- GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
- On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
- values of Givens rotations performed on the I-th level on the
- computation tree.
-
- C (input) DOUBLE PRECISION array, dimension ( N ).
- On entry, if the I-th subproblem is not square,
- C( I ) contains the C-value of a Givens rotation related to
- the right null space of the I-th subproblem.
-
- S (input) DOUBLE PRECISION array, dimension ( N ).
- On entry, if the I-th subproblem is not square,
- S( I ) contains the S-value of a Givens rotation related to
- the right null space of the I-th subproblem.
-
- WORK (workspace) DOUBLE PRECISION array.
- The dimension must be at least N.
-
- IWORK (workspace) INTEGER array.
- The dimension must be at least 3 * N
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Ren-Cang Li, Computer Science Division, University of
- California at Berkeley, USA
- Osni Marques, LBNL/NERSC, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- bx_dim1 = *ldbx;
- bx_offset = 1 + bx_dim1 * 1;
- bx -= bx_offset;
- givnum_dim1 = *ldu;
- givnum_offset = 1 + givnum_dim1 * 1;
- givnum -= givnum_offset;
- poles_dim1 = *ldu;
- poles_offset = 1 + poles_dim1 * 1;
- poles -= poles_offset;
- z_dim1 = *ldu;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
- difr_dim1 = *ldu;
- difr_offset = 1 + difr_dim1 * 1;
- difr -= difr_offset;
- difl_dim1 = *ldu;
- difl_offset = 1 + difl_dim1 * 1;
- difl -= difl_offset;
- vt_dim1 = *ldu;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- --k;
- --givptr;
- perm_dim1 = *ldgcol;
- perm_offset = 1 + perm_dim1 * 1;
- perm -= perm_offset;
- givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
- givcol -= givcol_offset;
- --c__;
- --s;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*smlsiz < 3) {
- *info = -2;
- } else if (*n < *smlsiz) {
- *info = -3;
- } else if (*nrhs < 1) {
- *info = -4;
- } else if (*ldb < *n) {
- *info = -6;
- } else if (*ldbx < *n) {
- *info = -8;
- } else if (*ldu < *n) {
- *info = -10;
- } else if (*ldgcol < *n) {
- *info = -19;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLALSA", &i__1);
- return 0;
- }
-
-/* Book-keeping and setting up the computation tree. */
-
- inode = 1;
- ndiml = inode + *n;
- ndimr = ndiml + *n;
-
- dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
- smlsiz);
-
-/*
- The following code applies back the left singular vector factors.
- For applying back the right singular vector factors, go to 50.
-*/
-
- if (*icompq == 1) {
- goto L50;
- }
-
-/*
- The nodes on the bottom level of the tree were solved
- by DLASDQ. The corresponding left and right singular vector
- matrices are in explicit form. First apply back the left
- singular vector matrices.
-*/
-
- ndb1 = (nd + 1) / 2;
- i__1 = nd;
- for (i__ = ndb1; i__ <= i__1; ++i__) {
-
-/*
- IC : center row of each node
- NL : number of rows of left subproblem
- NR : number of rows of right subproblem
- NLF: starting row of the left subproblem
- NRF: starting row of the right subproblem
-*/
-
- i1 = i__ - 1;
- ic = iwork[inode + i1];
- nl = iwork[ndiml + i1];
- nr = iwork[ndimr + i1];
- nlf = ic - nl;
- nrf = ic + 1;
- dgemm_("T", "N", &nl, nrhs, &nl, &c_b15, &u[nlf + u_dim1], ldu, &b[
- nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
- dgemm_("T", "N", &nr, nrhs, &nr, &c_b15, &u[nrf + u_dim1], ldu, &b[
- nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
-/* L10: */
- }
-
-/*
- Next copy the rows of B that correspond to unchanged rows
- in the bidiagonal matrix to BX.
-*/
-
- i__1 = nd;
- for (i__ = 1; i__ <= i__1; ++i__) {
- ic = iwork[inode + i__ - 1];
- dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
-/* L20: */
- }
-
-/*
- Finally go through the left singular vector matrices of all
- the other subproblems bottom-up on the tree.
-*/
-
- j = pow_ii(&c__2, &nlvl);
- sqre = 0;
-
- for (lvl = nlvl; lvl >= 1; --lvl) {
- lvl2 = ((lvl) << (1)) - 1;
-
-/*
- find the first node LF and last node LL on
- the current level LVL
-*/
-
- if (lvl == 1) {
- lf = 1;
- ll = 1;
- } else {
- i__1 = lvl - 1;
- lf = pow_ii(&c__2, &i__1);
- ll = ((lf) << (1)) - 1;
- }
- i__1 = ll;
- for (i__ = lf; i__ <= i__1; ++i__) {
- im1 = i__ - 1;
- ic = iwork[inode + im1];
- nl = iwork[ndiml + im1];
- nr = iwork[ndimr + im1];
- nlf = ic - nl;
- nrf = ic + 1;
- --j;
- dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
- b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
- givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
- givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
- poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
- lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
- j], &s[j], &work[1], info);
-/* L30: */
- }
-/* L40: */
- }
- goto L90;
-
-/* ICOMPQ = 1: applying back the right singular vector factors. */
-
-L50:
-
-/*
- First now go through the right singular vector matrices of all
- the tree nodes top-down.
-*/
-
- j = 0;
- i__1 = nlvl;
- for (lvl = 1; lvl <= i__1; ++lvl) {
- lvl2 = ((lvl) << (1)) - 1;
-
-/*
- Find the first node LF and last node LL on
- the current level LVL.
-*/
-
- if (lvl == 1) {
- lf = 1;
- ll = 1;
- } else {
- i__2 = lvl - 1;
- lf = pow_ii(&c__2, &i__2);
- ll = ((lf) << (1)) - 1;
- }
- i__2 = lf;
- for (i__ = ll; i__ >= i__2; --i__) {
- im1 = i__ - 1;
- ic = iwork[inode + im1];
- nl = iwork[ndiml + im1];
- nr = iwork[ndimr + im1];
- nlf = ic - nl;
- nrf = ic + 1;
- if (i__ == ll) {
- sqre = 0;
- } else {
- sqre = 1;
- }
- ++j;
- dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
- nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
- givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
- givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
- poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
- lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
- j], &s[j], &work[1], info);
-/* L60: */
- }
-/* L70: */
- }
-
-/*
- The nodes on the bottom level of the tree were solved
- by DLASDQ. The corresponding right singular vector
- matrices are in explicit form. Apply them back.
-*/
-
- ndb1 = (nd + 1) / 2;
- i__1 = nd;
- for (i__ = ndb1; i__ <= i__1; ++i__) {
- i1 = i__ - 1;
- ic = iwork[inode + i1];
- nl = iwork[ndiml + i1];
- nr = iwork[ndimr + i1];
- nlp1 = nl + 1;
- if (i__ == nd) {
- nrp1 = nr;
- } else {
- nrp1 = nr + 1;
- }
- nlf = ic - nl;
- nrf = ic + 1;
- dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b15, &vt[nlf + vt_dim1], ldu,
- &b[nlf + b_dim1], ldb, &c_b29, &bx[nlf + bx_dim1], ldbx);
- dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b15, &vt[nrf + vt_dim1], ldu,
- &b[nrf + b_dim1], ldb, &c_b29, &bx[nrf + bx_dim1], ldbx);
-/* L80: */
- }
-
-L90:
-
- return 0;
-
-/* End of DLALSA */
-
-} /* dlalsa_ */
-
-/* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer
- *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb,
- doublereal *rcond, integer *rank, doublereal *work, integer *iwork,
- integer *info)
-{
- /* System generated locals */
- integer b_dim1, b_offset, i__1, i__2;
- doublereal d__1;
-
- /* Builtin functions */
- double log(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer c__, i__, j, k;
- static doublereal r__;
- static integer s, u, z__;
- static doublereal cs;
- static integer bx;
- static doublereal sn;
- static integer st, vt, nm1, st1;
- static doublereal eps;
- static integer iwk;
- static doublereal tol;
- static integer difl, difr, perm, nsub;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer nlvl, sqre, bxst;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *),
- dcopy_(integer *, doublereal *, integer *, doublereal *, integer
- *);
- static integer poles, sizei, nsize, nwork, icmpq1, icmpq2;
-
- extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, integer *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- integer *), dlalsa_(integer *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- doublereal *, doublereal *, integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- integer *, integer *), dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *);
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
- *, integer *, integer *, doublereal *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *), dlacpy_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *), dlaset_(char *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, integer *),
- xerbla_(char *, integer *);
- static integer givcol;
- extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
- extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
- integer *);
- static doublereal orgnrm;
- static integer givnum, givptr, smlszp;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLALSD uses the singular value decomposition of A to solve the least
- squares problem of finding X to minimize the Euclidean norm of each
- column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
- are N-by-NRHS. The solution X overwrites B.
-
- The singular values of A smaller than RCOND times the largest
- singular value are treated as zero in solving the least squares
- problem; in this case a minimum norm solution is returned.
- The actual singular values are returned in D in ascending order.
-
- This code makes very mild assumptions about floating point
- arithmetic. It will work on machines with a guard digit in
- add/subtract, or on those binary machines without guard digits
- which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
- It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- = 'U': D and E define an upper bidiagonal matrix.
- = 'L': D and E define a lower bidiagonal matrix.
-
- SMLSIZ (input) INTEGER
- The maximum size of the subproblems at the bottom of the
- computation tree.
-
- N (input) INTEGER
- The dimension of the bidiagonal matrix. N >= 0.
-
- NRHS (input) INTEGER
- The number of columns of B. NRHS must be at least 1.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry D contains the main diagonal of the bidiagonal
- matrix. On exit, if INFO = 0, D contains its singular values.
-
- E (input) DOUBLE PRECISION array, dimension (N-1)
- Contains the super-diagonal entries of the bidiagonal matrix.
- On exit, E has been destroyed.
-
- B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
- On input, B contains the right hand sides of the least
- squares problem. On output, B contains the solution X.
-
- LDB (input) INTEGER
- The leading dimension of B in the calling subprogram.
- LDB must be at least max(1,N).
-
- RCOND (input) DOUBLE PRECISION
- The singular values of A less than or equal to RCOND times
- the largest singular value are treated as zero in solving
- the least squares problem. If RCOND is negative,
- machine precision is used instead.
- For example, if diag(S)*X=B were the least squares problem,
- where diag(S) is a diagonal matrix of singular values, the
- solution would be X(i) = B(i) / S(i) if S(i) is greater than
- RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
- RCOND*max(S).
-
- RANK (output) INTEGER
- The number of singular values of A greater than RCOND times
- the largest singular value.
-
- WORK (workspace) DOUBLE PRECISION array, dimension at least
- (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2),
- where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1).
-
- IWORK (workspace) INTEGER array, dimension at least
- (3*N*NLVL + 11*N)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: The algorithm failed to compute an singular value while
- working on the submatrix lying in rows and columns
- INFO/(N+1) through MOD(INFO,N+1).
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Ren-Cang Li, Computer Science Division, University of
- California at Berkeley, USA
- Osni Marques, LBNL/NERSC, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
- b -= b_offset;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*n < 0) {
- *info = -3;
- } else if (*nrhs < 1) {
- *info = -4;
- } else if (*ldb < 1 || *ldb < *n) {
- *info = -8;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLALSD", &i__1);
- return 0;
- }
-
- eps = EPSILON;
-
-/* Set up the tolerance. */
-
- if (*rcond <= 0. || *rcond >= 1.) {
- *rcond = eps;
- }
-
- *rank = 0;
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- } else if (*n == 1) {
- if (d__[1] == 0.) {
- dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
- } else {
- *rank = 1;
- dlascl_("G", &c__0, &c__0, &d__[1], &c_b15, &c__1, nrhs, &b[
- b_offset], ldb, info);
- d__[1] = abs(d__[1]);
- }
- return 0;
- }
-
-/* Rotate the matrix if it is lower bidiagonal. */
-
- if (*(unsigned char *)uplo == 'L') {
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
- d__[i__] = r__;
- e[i__] = sn * d__[i__ + 1];
- d__[i__ + 1] = cs * d__[i__ + 1];
- if (*nrhs == 1) {
- drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
- c__1, &cs, &sn);
- } else {
- work[((i__) << (1)) - 1] = cs;
- work[i__ * 2] = sn;
- }
-/* L10: */
- }
- if (*nrhs > 1) {
- i__1 = *nrhs;
- for (i__ = 1; i__ <= i__1; ++i__) {
- i__2 = *n - 1;
- for (j = 1; j <= i__2; ++j) {
- cs = work[((j) << (1)) - 1];
- sn = work[j * 2];
- drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
- b_dim1], &c__1, &cs, &sn);
-/* L20: */
- }
-/* L30: */
- }
- }
- }
-
-/* Scale. */
-
- nm1 = *n - 1;
- orgnrm = dlanst_("M", n, &d__[1], &e[1]);
- if (orgnrm == 0.) {
- dlaset_("A", n, nrhs, &c_b29, &c_b29, &b[b_offset], ldb);
- return 0;
- }
-
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, &c__1, &d__[1], n, info);
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &nm1, &c__1, &e[1], &nm1,
- info);
-
-/*
- If N is smaller than the minimum divide size SMLSIZ, then solve
- the problem with another solver.
-*/
-
- if (*n <= *smlsiz) {
- nwork = *n * *n + 1;
- dlaset_("A", n, n, &c_b29, &c_b15, &work[1], n);
- dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
- work[1], n, &b[b_offset], ldb, &work[nwork], info);
- if (*info != 0) {
- return 0;
- }
- tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (d__[i__] <= tol) {
- dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &b[i__ + b_dim1],
- ldb);
- } else {
- dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &b[
- i__ + b_dim1], ldb, info);
- ++(*rank);
- }
-/* L40: */
- }
- dgemm_("T", "N", n, nrhs, n, &c_b15, &work[1], n, &b[b_offset], ldb, &
- c_b29, &work[nwork], n);
- dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
-
-/* Unscale. */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n,
- info);
- dlasrt_("D", n, &d__[1], info);
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset],
- ldb, info);
-
- return 0;
- }
-
-/* Book-keeping and setting up some constants. */
-
- nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
- log(2.)) + 1;
-
- smlszp = *smlsiz + 1;
-
- u = 1;
- vt = *smlsiz * *n + 1;
- difl = vt + smlszp * *n;
- difr = difl + nlvl * *n;
- z__ = difr + ((nlvl * *n) << (1));
- c__ = z__ + nlvl * *n;
- s = c__ + *n;
- poles = s + *n;
- givnum = poles + ((nlvl) << (1)) * *n;
- bx = givnum + ((nlvl) << (1)) * *n;
- nwork = bx + *n * *nrhs;
-
- sizei = *n + 1;
- k = sizei + *n;
- givptr = k + *n;
- perm = givptr + *n;
- givcol = perm + nlvl * *n;
- iwk = givcol + ((nlvl * *n) << (1));
-
- st = 1;
- sqre = 0;
- icmpq1 = 1;
- icmpq2 = 0;
- nsub = 0;
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = d__[i__], abs(d__1)) < eps) {
- d__[i__] = d_sign(&eps, &d__[i__]);
- }
-/* L50: */
- }
-
- i__1 = nm1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
- ++nsub;
- iwork[nsub] = st;
-
-/*
- Subproblem found. First determine its size and then
- apply divide and conquer on it.
-*/
-
- if (i__ < nm1) {
-
-/* A subproblem with E(I) small for I < NM1. */
-
- nsize = i__ - st + 1;
- iwork[sizei + nsub - 1] = nsize;
- } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
-
-/* A subproblem with E(NM1) not too small but I = NM1. */
-
- nsize = *n - st + 1;
- iwork[sizei + nsub - 1] = nsize;
- } else {
-
-/*
- A subproblem with E(NM1) small. This implies an
- 1-by-1 subproblem at D(N), which is not solved
- explicitly.
-*/
-
- nsize = i__ - st + 1;
- iwork[sizei + nsub - 1] = nsize;
- ++nsub;
- iwork[nsub] = *n;
- iwork[sizei + nsub - 1] = 1;
- dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
- }
- st1 = st - 1;
- if (nsize == 1) {
-
-/*
- This is a 1-by-1 subproblem and is not solved
- explicitly.
-*/
-
- dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
- } else if (nsize <= *smlsiz) {
-
-/* This is a small subproblem and is solved by DLASDQ. */
-
- dlaset_("A", &nsize, &nsize, &c_b29, &c_b15, &work[vt + st1],
- n);
- dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
- st], &work[vt + st1], n, &work[nwork], n, &b[st +
- b_dim1], ldb, &work[nwork], info);
- if (*info != 0) {
- return 0;
- }
- dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
- st1], n);
- } else {
-
-/* A large problem. Solve it using divide and conquer. */
-
- dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
- work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
- work[difl + st1], &work[difr + st1], &work[z__ + st1],
- &work[poles + st1], &iwork[givptr + st1], &iwork[
- givcol + st1], n, &iwork[perm + st1], &work[givnum +
- st1], &work[c__ + st1], &work[s + st1], &work[nwork],
- &iwork[iwk], info);
- if (*info != 0) {
- return 0;
- }
- bxst = bx + st1;
- dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
- work[bxst], n, &work[u + st1], n, &work[vt + st1], &
- iwork[k + st1], &work[difl + st1], &work[difr + st1],
- &work[z__ + st1], &work[poles + st1], &iwork[givptr +
- st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
- work[givnum + st1], &work[c__ + st1], &work[s + st1],
- &work[nwork], &iwork[iwk], info);
- if (*info != 0) {
- return 0;
- }
- }
- st = i__ + 1;
- }
-/* L60: */
- }
-
-/* Apply the singular values and treat the tiny ones as zero. */
-
- tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/*
- Some of the elements in D can be negative because 1-by-1
- subproblems were not solved explicitly.
-*/
-
- if ((d__1 = d__[i__], abs(d__1)) <= tol) {
- dlaset_("A", &c__1, nrhs, &c_b29, &c_b29, &work[bx + i__ - 1], n);
- } else {
- ++(*rank);
- dlascl_("G", &c__0, &c__0, &d__[i__], &c_b15, &c__1, nrhs, &work[
- bx + i__ - 1], n, info);
- }
- d__[i__] = (d__1 = d__[i__], abs(d__1));
-/* L70: */
- }
-
-/* Now apply back the right singular vectors. */
-
- icmpq2 = 1;
- i__1 = nsub;
- for (i__ = 1; i__ <= i__1; ++i__) {
- st = iwork[i__];
- st1 = st - 1;
- nsize = iwork[sizei + i__ - 1];
- bxst = bx + st1;
- if (nsize == 1) {
- dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
- } else if (nsize <= *smlsiz) {
- dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b15, &work[vt + st1], n,
- &work[bxst], n, &c_b29, &b[st + b_dim1], ldb);
- } else {
- dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
- b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
- k + st1], &work[difl + st1], &work[difr + st1], &work[z__
- + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
- givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
- &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
- iwk], info);
- if (*info != 0) {
- return 0;
- }
- }
-/* L80: */
- }
-
-/* Unscale and sort the singular values. */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, n, &c__1, &d__[1], n, info);
- dlasrt_("D", n, &d__[1], info);
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, n, nrhs, &b[b_offset], ldb,
- info);
-
- return 0;
-
-/* End of DLALSD */
-
-} /* dlalsd_ */
-
-/* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer
- *dtrd1, integer *dtrd2, integer *index)
-{
- /* System generated locals */
- integer i__1;
-
- /* Local variables */
- static integer i__, ind1, ind2, n1sv, n2sv;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAMRG will create a permutation list which will merge the elements
- of A (which is composed of two independently sorted sets) into a
- single set which is sorted in ascending order.
-
- Arguments
- =========
-
- N1 (input) INTEGER
- N2 (input) INTEGER
- These arguements contain the respective lengths of the two
- sorted lists to be merged.
-
- A (input) DOUBLE PRECISION array, dimension (N1+N2)
- The first N1 elements of A contain a list of numbers which
- are sorted in either ascending or descending order. Likewise
- for the final N2 elements.
-
- DTRD1 (input) INTEGER
- DTRD2 (input) INTEGER
- These are the strides to be taken through the array A.
- Allowable strides are 1 and -1. They indicate whether a
- subset of A is sorted in ascending (DTRDx = 1) or descending
- (DTRDx = -1) order.
-
- INDEX (output) INTEGER array, dimension (N1+N2)
- On exit this array will contain a permutation such that
- if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be
- sorted in ascending order.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --index;
- --a;
-
- /* Function Body */
- n1sv = *n1;
- n2sv = *n2;
- if (*dtrd1 > 0) {
- ind1 = 1;
- } else {
- ind1 = *n1;
- }
- if (*dtrd2 > 0) {
- ind2 = *n1 + 1;
- } else {
- ind2 = *n1 + *n2;
- }
- i__ = 1;
-/* while ( (N1SV > 0) & (N2SV > 0) ) */
-L10:
- if ((n1sv > 0 && n2sv > 0)) {
- if (a[ind1] <= a[ind2]) {
- index[i__] = ind1;
- ++i__;
- ind1 += *dtrd1;
- --n1sv;
- } else {
- index[i__] = ind2;
- ++i__;
- ind2 += *dtrd2;
- --n2sv;
- }
- goto L10;
- }
-/* end while */
- if (n1sv == 0) {
- i__1 = n2sv;
- for (n1sv = 1; n1sv <= i__1; ++n1sv) {
- index[i__] = ind2;
- ++i__;
- ind2 += *dtrd2;
-/* L20: */
- }
- } else {
-/* N2SV .EQ. 0 */
- i__1 = n1sv;
- for (n2sv = 1; n2sv <= i__1; ++n2sv) {
- index[i__] = ind1;
- ++i__;
- ind1 += *dtrd1;
-/* L30: */
- }
- }
-
- return 0;
-
-/* End of DLAMRG */
-
-} /* dlamrg_ */
-
-doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer
- *lda, doublereal *work)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
- doublereal ret_val, d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, j;
- static doublereal sum, scale;
- extern logical lsame_(char *, char *);
- static doublereal value;
- extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
- doublereal *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLANGE returns the value of the one norm, or the Frobenius norm, or
- the infinity norm, or the element of largest absolute value of a
- real matrix A.
-
- Description
- ===========
-
- DLANGE returns the value
-
- DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
- (
- ( norm1(A), NORM = '1', 'O' or 'o'
- (
- ( normI(A), NORM = 'I' or 'i'
- (
- ( normF(A), NORM = 'F', 'f', 'E' or 'e'
-
- where norm1 denotes the one norm of a matrix (maximum column sum),
- normI denotes the infinity norm of a matrix (maximum row sum) and
- normF denotes the Frobenius norm of a matrix (square root of sum of
- squares). Note that max(abs(A(i,j))) is not a matrix norm.
-
- Arguments
- =========
-
- NORM (input) CHARACTER*1
- Specifies the value to be returned in DLANGE as described
- above.
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0. When M = 0,
- DLANGE is set to zero.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0. When N = 0,
- DLANGE is set to zero.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- The m by n matrix A.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(M,1).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
- where LWORK >= M when NORM = 'I'; otherwise, WORK is not
- referenced.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --work;
-
- /* Function Body */
- if (min(*m,*n) == 0) {
- value = 0.;
- } else if (lsame_(norm, "M")) {
-
-/* Find max(abs(A(i,j))). */
-
- value = 0.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
-/* Computing MAX */
- d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
- value = max(d__2,d__3);
-/* L10: */
- }
-/* L20: */
- }
- } else if (lsame_(norm, "O") || *(unsigned char *)
- norm == '1') {
-
-/* Find norm1(A). */
-
- value = 0.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = 0.;
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
-/* L30: */
- }
- value = max(value,sum);
-/* L40: */
- }
- } else if (lsame_(norm, "I")) {
-
-/* Find normI(A). */
-
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- work[i__] = 0.;
-/* L50: */
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
-/* L60: */
- }
-/* L70: */
- }
- value = 0.;
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__1 = value, d__2 = work[i__];
- value = max(d__1,d__2);
-/* L80: */
- }
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
-
-/* Find normF(A). */
-
- scale = 0.;
- sum = 1.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
-/* L90: */
- }
- value = scale * sqrt(sum);
- }
-
- ret_val = value;
- return ret_val;
-
-/* End of DLANGE */
-
-} /* dlange_ */
-
-doublereal dlanhs_(char *norm, integer *n, doublereal *a, integer *lda,
- doublereal *work)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
- doublereal ret_val, d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, j;
- static doublereal sum, scale;
- extern logical lsame_(char *, char *);
- static doublereal value;
- extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
- doublereal *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLANHS returns the value of the one norm, or the Frobenius norm, or
- the infinity norm, or the element of largest absolute value of a
- Hessenberg matrix A.
-
- Description
- ===========
-
- DLANHS returns the value
-
- DLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
- (
- ( norm1(A), NORM = '1', 'O' or 'o'
- (
- ( normI(A), NORM = 'I' or 'i'
- (
- ( normF(A), NORM = 'F', 'f', 'E' or 'e'
-
- where norm1 denotes the one norm of a matrix (maximum column sum),
- normI denotes the infinity norm of a matrix (maximum row sum) and
- normF denotes the Frobenius norm of a matrix (square root of sum of
- squares). Note that max(abs(A(i,j))) is not a matrix norm.
-
- Arguments
- =========
-
- NORM (input) CHARACTER*1
- Specifies the value to be returned in DLANHS as described
- above.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0. When N = 0, DLANHS is
- set to zero.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- The n by n upper Hessenberg matrix A; the part of A below the
- first sub-diagonal is not referenced.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(N,1).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
- where LWORK >= N when NORM = 'I'; otherwise, WORK is not
- referenced.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --work;
-
- /* Function Body */
- if (*n == 0) {
- value = 0.;
- } else if (lsame_(norm, "M")) {
-
-/* Find max(abs(A(i,j))). */
-
- value = 0.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = *n, i__4 = j + 1;
- i__2 = min(i__3,i__4);
- for (i__ = 1; i__ <= i__2; ++i__) {
-/* Computing MAX */
- d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
- value = max(d__2,d__3);
-/* L10: */
- }
-/* L20: */
- }
- } else if (lsame_(norm, "O") || *(unsigned char *)
- norm == '1') {
-
-/* Find norm1(A). */
-
- value = 0.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = 0.;
-/* Computing MIN */
- i__3 = *n, i__4 = j + 1;
- i__2 = min(i__3,i__4);
- for (i__ = 1; i__ <= i__2; ++i__) {
- sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
-/* L30: */
- }
- value = max(value,sum);
-/* L40: */
- }
- } else if (lsame_(norm, "I")) {
-
-/* Find normI(A). */
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- work[i__] = 0.;
-/* L50: */
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = *n, i__4 = j + 1;
- i__2 = min(i__3,i__4);
- for (i__ = 1; i__ <= i__2; ++i__) {
- work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
-/* L60: */
- }
-/* L70: */
- }
- value = 0.;
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__1 = value, d__2 = work[i__];
- value = max(d__1,d__2);
-/* L80: */
- }
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
-
-/* Find normF(A). */
-
- scale = 0.;
- sum = 1.;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = *n, i__4 = j + 1;
- i__2 = min(i__3,i__4);
- dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
-/* L90: */
- }
- value = scale * sqrt(sum);
- }
-
- ret_val = value;
- return ret_val;
-
-/* End of DLANHS */
-
-} /* dlanhs_ */
-
-doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e)
-{
- /* System generated locals */
- integer i__1;
- doublereal ret_val, d__1, d__2, d__3, d__4, d__5;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__;
- static doublereal sum, scale;
- extern logical lsame_(char *, char *);
- static doublereal anorm;
- extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
- doublereal *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLANST returns the value of the one norm, or the Frobenius norm, or
- the infinity norm, or the element of largest absolute value of a
- real symmetric tridiagonal matrix A.
-
- Description
- ===========
-
- DLANST returns the value
-
- DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm'
- (
- ( norm1(A), NORM = '1', 'O' or 'o'
- (
- ( normI(A), NORM = 'I' or 'i'
- (
- ( normF(A), NORM = 'F', 'f', 'E' or 'e'
-
- where norm1 denotes the one norm of a matrix (maximum column sum),
- normI denotes the infinity norm of a matrix (maximum row sum) and
- normF denotes the Frobenius norm of a matrix (square root of sum of
- squares). Note that max(abs(A(i,j))) is not a matrix norm.
-
- Arguments
- =========
-
- NORM (input) CHARACTER*1
- Specifies the value to be returned in DLANST as described
- above.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0. When N = 0, DLANST is
- set to zero.
-
- D (input) DOUBLE PRECISION array, dimension (N)
- The diagonal elements of A.
-
- E (input) DOUBLE PRECISION array, dimension (N-1)
- The (n-1) sub-diagonal or super-diagonal elements of A.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --e;
- --d__;
-
- /* Function Body */
- if (*n <= 0) {
- anorm = 0.;
- } else if (lsame_(norm, "M")) {
-
-/* Find max(abs(A(i,j))). */
-
- anorm = (d__1 = d__[*n], abs(d__1));
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
- anorm = max(d__2,d__3);
-/* Computing MAX */
- d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1));
- anorm = max(d__2,d__3);
-/* L10: */
- }
- } else if (lsame_(norm, "O") || *(unsigned char *)
- norm == '1' || lsame_(norm, "I")) {
-
-/* Find norm1(A). */
-
- if (*n == 1) {
- anorm = abs(d__[1]);
- } else {
-/* Computing MAX */
- d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs(
- d__1)) + (d__2 = d__[*n], abs(d__2));
- anorm = max(d__3,d__4);
- i__1 = *n - 1;
- for (i__ = 2; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
- i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3));
- anorm = max(d__4,d__5);
-/* L20: */
- }
- }
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
-
-/* Find normF(A). */
-
- scale = 0.;
- sum = 1.;
- if (*n > 1) {
- i__1 = *n - 1;
- dlassq_(&i__1, &e[1], &c__1, &scale, &sum);
- sum *= 2;
- }
- dlassq_(n, &d__[1], &c__1, &scale, &sum);
- anorm = scale * sqrt(sum);
- }
-
- ret_val = anorm;
- return ret_val;
-
-/* End of DLANST */
-
-} /* dlanst_ */
-
-doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer
- *lda, doublereal *work)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
- doublereal ret_val, d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, j;
- static doublereal sum, absa, scale;
- extern logical lsame_(char *, char *);
- static doublereal value;
- extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
- doublereal *, doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLANSY returns the value of the one norm, or the Frobenius norm, or
- the infinity norm, or the element of largest absolute value of a
- real symmetric matrix A.
-
- Description
- ===========
-
- DLANSY returns the value
-
- DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
- (
- ( norm1(A), NORM = '1', 'O' or 'o'
- (
- ( normI(A), NORM = 'I' or 'i'
- (
- ( normF(A), NORM = 'F', 'f', 'E' or 'e'
-
- where norm1 denotes the one norm of a matrix (maximum column sum),
- normI denotes the infinity norm of a matrix (maximum row sum) and
- normF denotes the Frobenius norm of a matrix (square root of sum of
- squares). Note that max(abs(A(i,j))) is not a matrix norm.
-
- Arguments
- =========
-
- NORM (input) CHARACTER*1
- Specifies the value to be returned in DLANSY as described
- above.
-
- UPLO (input) CHARACTER*1
- Specifies whether the upper or lower triangular part of the
- symmetric matrix A is to be referenced.
- = 'U': Upper triangular part of A is referenced
- = 'L': Lower triangular part of A is referenced
-
- N (input) INTEGER
- The order of the matrix A. N >= 0. When N = 0, DLANSY is
- set to zero.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,N)
- The symmetric matrix A. If UPLO = 'U', the leading n by n
- upper triangular part of A contains the upper triangular part
- of the matrix A, and the strictly lower triangular part of A
- is not referenced. If UPLO = 'L', the leading n by n lower
- triangular part of A contains the lower triangular part of
- the matrix A, and the strictly upper triangular part of A is
- not referenced.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(N,1).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
- where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
- WORK is not referenced.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --work;
-
- /* Function Body */
- if (*n == 0) {
- value = 0.;
- } else if (lsame_(norm, "M")) {
-
-/* Find max(abs(A(i,j))). */
-
- value = 0.;
- if (lsame_(uplo, "U")) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = j;
- for (i__ = 1; i__ <= i__2; ++i__) {
-/* Computing MAX */
- d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
- d__1));
- value = max(d__2,d__3);
-/* L10: */
- }
-/* L20: */
- }
- } else {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = j; i__ <= i__2; ++i__) {
-/* Computing MAX */
- d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
- d__1));
- value = max(d__2,d__3);
-/* L30: */
- }
-/* L40: */
- }
- }
- } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
-
-/* Find normI(A) ( = norm1(A), since A is symmetric). */
-
- value = 0.;
- if (lsame_(uplo, "U")) {
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = 0.;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
- sum += absa;
- work[i__] += absa;
-/* L50: */
- }
- work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1));
-/* L60: */
- }
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__1 = value, d__2 = work[i__];
- value = max(d__1,d__2);
-/* L70: */
- }
- } else {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- work[i__] = 0.;
-/* L80: */
- }
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1));
- i__2 = *n;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
- sum += absa;
- work[i__] += absa;
-/* L90: */
- }
- value = max(value,sum);
-/* L100: */
- }
- }
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
-
-/* Find normF(A). */
-
- scale = 0.;
- sum = 1.;
- if (lsame_(uplo, "U")) {
- i__1 = *n;
- for (j = 2; j <= i__1; ++j) {
- i__2 = j - 1;
- dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
-/* L110: */
- }
- } else {
- i__1 = *n - 1;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n - j;
- dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
-/* L120: */
- }
- }
- sum *= 2;
- i__1 = *lda + 1;
- dlassq_(n, &a[a_offset], &i__1, &scale, &sum);
- value = scale * sqrt(sum);
- }
-
- ret_val = value;
- return ret_val;
-
-/* End of DLANSY */
-
-} /* dlansy_ */
-
-/* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__,
- doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r,
- doublereal *rt2i, doublereal *cs, doublereal *sn)
-{
- /* System generated locals */
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double d_sign(doublereal *, doublereal *), sqrt(doublereal);
-
- /* Local variables */
- static doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau,
- temp, scale, bcmax, bcmis, sigma;
-
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric
- matrix in standard form:
-
- [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ]
- [ C D ] [ SN CS ] [ CC DD ] [-SN CS ]
-
- where either
- 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or
- 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex
- conjugate eigenvalues.
-
- Arguments
- =========
-
- A (input/output) DOUBLE PRECISION
- B (input/output) DOUBLE PRECISION
- C (input/output) DOUBLE PRECISION
- D (input/output) DOUBLE PRECISION
- On entry, the elements of the input matrix.
- On exit, they are overwritten by the elements of the
- standardised Schur form.
-
- RT1R (output) DOUBLE PRECISION
- RT1I (output) DOUBLE PRECISION
- RT2R (output) DOUBLE PRECISION
- RT2I (output) DOUBLE PRECISION
- The real and imaginary parts of the eigenvalues. If the
- eigenvalues are a complex conjugate pair, RT1I > 0.
-
- CS (output) DOUBLE PRECISION
- SN (output) DOUBLE PRECISION
- Parameters of the rotation matrix.
-
- Further Details
- ===============
-
- Modified by V. Sima, Research Institute for Informatics, Bucharest,
- Romania, to reduce the risk of cancellation errors,
- when computing real eigenvalues, and to ensure, if possible, that
- abs(RT1R) >= abs(RT2R).
-
- =====================================================================
-*/
-
-
- eps = PRECISION;
- if (*c__ == 0.) {
- *cs = 1.;
- *sn = 0.;
- goto L10;
-
- } else if (*b == 0.) {
-
-/* Swap rows and columns */
-
- *cs = 0.;
- *sn = 1.;
- temp = *d__;
- *d__ = *a;
- *a = temp;
- *b = -(*c__);
- *c__ = 0.;
- goto L10;
- } else if ((*a - *d__ == 0. && d_sign(&c_b15, b) != d_sign(&c_b15, c__)))
- {
- *cs = 1.;
- *sn = 0.;
- goto L10;
- } else {
-
- temp = *a - *d__;
- p = temp * .5;
-/* Computing MAX */
- d__1 = abs(*b), d__2 = abs(*c__);
- bcmax = max(d__1,d__2);
-/* Computing MIN */
- d__1 = abs(*b), d__2 = abs(*c__);
- bcmis = min(d__1,d__2) * d_sign(&c_b15, b) * d_sign(&c_b15, c__);
-/* Computing MAX */
- d__1 = abs(p);
- scale = max(d__1,bcmax);
- z__ = p / scale * p + bcmax / scale * bcmis;
-
-/*
- If Z is of the order of the machine accuracy, postpone the
- decision on the nature of eigenvalues
-*/
-
- if (z__ >= eps * 4.) {
-
-/* Real eigenvalues. Compute A and D. */
-
- d__1 = sqrt(scale) * sqrt(z__);
- z__ = p + d_sign(&d__1, &p);
- *a = *d__ + z__;
- *d__ -= bcmax / z__ * bcmis;
-
-/* Compute B and the rotation matrix */
-
- tau = dlapy2_(c__, &z__);
- *cs = z__ / tau;
- *sn = *c__ / tau;
- *b -= *c__;
- *c__ = 0.;
- } else {
-
-/*
- Complex eigenvalues, or real (almost) equal eigenvalues.
- Make diagonal elements equal.
-*/
-
- sigma = *b + *c__;
- tau = dlapy2_(&sigma, &temp);
- *cs = sqrt((abs(sigma) / tau + 1.) * .5);
- *sn = -(p / (tau * *cs)) * d_sign(&c_b15, &sigma);
-
-/*
- Compute [ AA BB ] = [ A B ] [ CS -SN ]
- [ CC DD ] [ C D ] [ SN CS ]
-*/
-
- aa = *a * *cs + *b * *sn;
- bb = -(*a) * *sn + *b * *cs;
- cc = *c__ * *cs + *d__ * *sn;
- dd = -(*c__) * *sn + *d__ * *cs;
-
-/*
- Compute [ A B ] = [ CS SN ] [ AA BB ]
- [ C D ] [-SN CS ] [ CC DD ]
-*/
-
- *a = aa * *cs + cc * *sn;
- *b = bb * *cs + dd * *sn;
- *c__ = -aa * *sn + cc * *cs;
- *d__ = -bb * *sn + dd * *cs;
-
- temp = (*a + *d__) * .5;
- *a = temp;
- *d__ = temp;
-
- if (*c__ != 0.) {
- if (*b != 0.) {
- if (d_sign(&c_b15, b) == d_sign(&c_b15, c__)) {
-
-/* Real eigenvalues: reduce to upper triangular form */
-
- sab = sqrt((abs(*b)));
- sac = sqrt((abs(*c__)));
- d__1 = sab * sac;
- p = d_sign(&d__1, c__);
- tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1)));
- *a = temp + p;
- *d__ = temp - p;
- *b -= *c__;
- *c__ = 0.;
- cs1 = sab * tau;
- sn1 = sac * tau;
- temp = *cs * cs1 - *sn * sn1;
- *sn = *cs * sn1 + *sn * cs1;
- *cs = temp;
- }
- } else {
- *b = -(*c__);
- *c__ = 0.;
- temp = *cs;
- *cs = -(*sn);
- *sn = temp;
- }
- }
- }
-
- }
-
-L10:
-
-/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
-
- *rt1r = *a;
- *rt2r = *d__;
- if (*c__ == 0.) {
- *rt1i = 0.;
- *rt2i = 0.;
- } else {
- *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__)));
- *rt2i = -(*rt1i);
- }
- return 0;
-
-/* End of DLANV2 */
-
-} /* dlanv2_ */
-
-doublereal dlapy2_(doublereal *x, doublereal *y)
-{
- /* System generated locals */
- doublereal ret_val, d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal w, z__, xabs, yabs;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary
- overflow.
-
- Arguments
- =========
-
- X (input) DOUBLE PRECISION
- Y (input) DOUBLE PRECISION
- X and Y specify the values x and y.
-
- =====================================================================
-*/
-
-
- xabs = abs(*x);
- yabs = abs(*y);
- w = max(xabs,yabs);
- z__ = min(xabs,yabs);
- if (z__ == 0.) {
- ret_val = w;
- } else {
-/* Computing 2nd power */
- d__1 = z__ / w;
- ret_val = w * sqrt(d__1 * d__1 + 1.);
- }
- return ret_val;
-
-/* End of DLAPY2 */
-
-} /* dlapy2_ */
-
-doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__)
-{
- /* System generated locals */
- doublereal ret_val, d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal w, xabs, yabs, zabs;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause
- unnecessary overflow.
-
- Arguments
- =========
-
- X (input) DOUBLE PRECISION
- Y (input) DOUBLE PRECISION
- Z (input) DOUBLE PRECISION
- X, Y and Z specify the values x, y and z.
-
- =====================================================================
-*/
-
-
- xabs = abs(*x);
- yabs = abs(*y);
- zabs = abs(*z__);
-/* Computing MAX */
- d__1 = max(xabs,yabs);
- w = max(d__1,zabs);
- if (w == 0.) {
- ret_val = 0.;
- } else {
-/* Computing 2nd power */
- d__1 = xabs / w;
-/* Computing 2nd power */
- d__2 = yabs / w;
-/* Computing 2nd power */
- d__3 = zabs / w;
- ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3);
- }
- return ret_val;
-
-/* End of DLAPY3 */
-
-} /* dlapy3_ */
-
-/* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v,
- integer *incv, doublereal *tau, doublereal *c__, integer *ldc,
- doublereal *work)
-{
- /* System generated locals */
- integer c_dim1, c_offset;
- doublereal d__1;
-
- /* Local variables */
- extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLARF applies a real elementary reflector H to a real m by n matrix
- C, from either the left or the right. H is represented in the form
-
- H = I - tau * v * v'
-
- where tau is a real scalar and v is a real vector.
-
- If tau = 0, then H is taken to be the unit matrix.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': form H * C
- = 'R': form C * H
-
- M (input) INTEGER
- The number of rows of the matrix C.
-
- N (input) INTEGER
- The number of columns of the matrix C.
-
- V (input) DOUBLE PRECISION array, dimension
- (1 + (M-1)*abs(INCV)) if SIDE = 'L'
- or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
- The vector v in the representation of H. V is not used if
- TAU = 0.
-
- INCV (input) INTEGER
- The increment between elements of v. INCV <> 0.
-
- TAU (input) DOUBLE PRECISION
- The value tau in the representation of H.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by the matrix H * C if SIDE = 'L',
- or C * H if SIDE = 'R'.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (N) if SIDE = 'L'
- or (M) if SIDE = 'R'
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --v;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- if (lsame_(side, "L")) {
-
-/* Form H * C */
-
- if (*tau != 0.) {
-
-/* w := C' * v */
-
- dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1],
- incv, &c_b29, &work[1], &c__1);
-
-/* C := C - v * w' */
-
- d__1 = -(*tau);
- dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
- ldc);
- }
- } else {
-
-/* Form C * H */
-
- if (*tau != 0.) {
-
-/* w := C * v */
-
- dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1],
- incv, &c_b29, &work[1], &c__1);
-
-/* C := C - w * v' */
-
- d__1 = -(*tau);
- dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
- ldc);
- }
- }
- return 0;
-
-/* End of DLARF */
-
-} /* dlarf_ */
-
-/* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char *
- storev, integer *m, integer *n, integer *k, doublereal *v, integer *
- ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc,
- doublereal *work, integer *ldwork)
-{
- /* System generated locals */
- integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
- work_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *), dtrmm_(char *, char *, char *, char *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *);
- static char transt[1];
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLARFB applies a real block reflector H or its transpose H' to a
- real m by n matrix C, from either the left or the right.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply H or H' from the Left
- = 'R': apply H or H' from the Right
-
- TRANS (input) CHARACTER*1
- = 'N': apply H (No transpose)
- = 'T': apply H' (Transpose)
-
- DIRECT (input) CHARACTER*1
- Indicates how H is formed from a product of elementary
- reflectors
- = 'F': H = H(1) H(2) . . . H(k) (Forward)
- = 'B': H = H(k) . . . H(2) H(1) (Backward)
-
- STOREV (input) CHARACTER*1
- Indicates how the vectors which define the elementary
- reflectors are stored:
- = 'C': Columnwise
- = 'R': Rowwise
-
- M (input) INTEGER
- The number of rows of the matrix C.
-
- N (input) INTEGER
- The number of columns of the matrix C.
-
- K (input) INTEGER
- The order of the matrix T (= the number of elementary
- reflectors whose product defines the block reflector).
-
- V (input) DOUBLE PRECISION array, dimension
- (LDV,K) if STOREV = 'C'
- (LDV,M) if STOREV = 'R' and SIDE = 'L'
- (LDV,N) if STOREV = 'R' and SIDE = 'R'
- The matrix V. See further details.
-
- LDV (input) INTEGER
- The leading dimension of the array V.
- If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
- if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
- if STOREV = 'R', LDV >= K.
-
- T (input) DOUBLE PRECISION array, dimension (LDT,K)
- The triangular k by k matrix T in the representation of the
- block reflector.
-
- LDT (input) INTEGER
- The leading dimension of the array T. LDT >= K.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDA >= max(1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K)
-
- LDWORK (input) INTEGER
- The leading dimension of the array WORK.
- If SIDE = 'L', LDWORK >= max(1,N);
- if SIDE = 'R', LDWORK >= max(1,M).
-
- =====================================================================
-
-
- Quick return if possible
-*/
-
- /* Parameter adjustments */
- v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
- v -= v_offset;
- t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
- t -= t_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- work_dim1 = *ldwork;
- work_offset = 1 + work_dim1 * 1;
- work -= work_offset;
-
- /* Function Body */
- if (*m <= 0 || *n <= 0) {
- return 0;
- }
-
- if (lsame_(trans, "N")) {
- *(unsigned char *)transt = 'T';
- } else {
- *(unsigned char *)transt = 'N';
- }
-
- if (lsame_(storev, "C")) {
-
- if (lsame_(direct, "F")) {
-
-/*
- Let V = ( V1 ) (first K rows)
- ( V2 )
- where V1 is unit lower triangular.
-*/
-
- if (lsame_(side, "L")) {
-
-/*
- Form H * C or H' * C where C = ( C1 )
- ( C2 )
-
- W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
-
- W := C1'
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
- &c__1);
-/* L10: */
- }
-
-/* W := W * V1 */
-
- dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15,
- &v[v_offset], ldv, &work[work_offset], ldwork);
- if (*m > *k) {
-
-/* W := W + C2'*V2 */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, &
- c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1],
- ldv, &c_b15, &work[work_offset], ldwork);
- }
-
-/* W := W * T' or W * T */
-
- dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - V * W' */
-
- if (*m > *k) {
-
-/* C2 := C2 - V2 * W' */
-
- i__1 = *m - *k;
- dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151,
- &v[*k + 1 + v_dim1], ldv, &work[work_offset],
- ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc);
- }
-
-/* W := W * V1' */
-
- dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, &
- v[v_offset], ldv, &work[work_offset], ldwork);
-
-/* C1 := C1 - W' */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
-/* L20: */
- }
-/* L30: */
- }
-
- } else if (lsame_(side, "R")) {
-
-/*
- Form C * H or C * H' where C = ( C1 C2 )
-
- W := C * V = (C1*V1 + C2*V2) (stored in WORK)
-
- W := C1
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
- work_dim1 + 1], &c__1);
-/* L40: */
- }
-
-/* W := W * V1 */
-
- dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15,
- &v[v_offset], ldv, &work[work_offset], ldwork);
- if (*n > *k) {
-
-/* W := W + C2 * V2 */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "No transpose", m, k, &i__1, &
- c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
- 1 + v_dim1], ldv, &c_b15, &work[work_offset],
- ldwork);
- }
-
-/* W := W * T or W * T' */
-
- dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - W * V' */
-
- if (*n > *k) {
-
-/* C2 := C2 - W * V2' */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151,
- &work[work_offset], ldwork, &v[*k + 1 + v_dim1],
- ldv, &c_b15, &c__[(*k + 1) * c_dim1 + 1], ldc);
- }
-
-/* W := W * V1' */
-
- dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, &
- v[v_offset], ldv, &work[work_offset], ldwork);
-
-/* C1 := C1 - W */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
-/* L50: */
- }
-/* L60: */
- }
- }
-
- } else {
-
-/*
- Let V = ( V1 )
- ( V2 ) (last K rows)
- where V2 is unit upper triangular.
-*/
-
- if (lsame_(side, "L")) {
-
-/*
- Form H * C or H' * C where C = ( C1 )
- ( C2 )
-
- W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
-
- W := C2'
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
- work_dim1 + 1], &c__1);
-/* L70: */
- }
-
-/* W := W * V2 */
-
- dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15,
- &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
- ldwork);
- if (*m > *k) {
-
-/* W := W + C1'*V1 */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b15, &
- c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
- work[work_offset], ldwork);
- }
-
-/* W := W * T' or W * T */
-
- dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - V * W' */
-
- if (*m > *k) {
-
-/* C1 := C1 - V1 * W' */
-
- i__1 = *m - *k;
- dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b151,
- &v[v_offset], ldv, &work[work_offset], ldwork, &
- c_b15, &c__[c_offset], ldc)
- ;
- }
-
-/* W := W * V2' */
-
- dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, &
- v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
- ldwork);
-
-/* C2 := C2 - W' */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
- work_dim1];
-/* L80: */
- }
-/* L90: */
- }
-
- } else if (lsame_(side, "R")) {
-
-/*
- Form C * H or C * H' where C = ( C1 C2 )
-
- W := C * V = (C1*V1 + C2*V2) (stored in WORK)
-
- W := C2
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
- j * work_dim1 + 1], &c__1);
-/* L100: */
- }
-
-/* W := W * V2 */
-
- dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15,
- &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
- ldwork);
- if (*n > *k) {
-
-/* W := W + C1 * V1 */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "No transpose", m, k, &i__1, &
- c_b15, &c__[c_offset], ldc, &v[v_offset], ldv, &
- c_b15, &work[work_offset], ldwork);
- }
-
-/* W := W * T or W * T' */
-
- dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - W * V' */
-
- if (*n > *k) {
-
-/* C1 := C1 - W * V1' */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b151,
- &work[work_offset], ldwork, &v[v_offset], ldv, &
- c_b15, &c__[c_offset], ldc)
- ;
- }
-
-/* W := W * V2' */
-
- dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, &
- v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
- ldwork);
-
-/* C2 := C2 - W */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
- work_dim1];
-/* L110: */
- }
-/* L120: */
- }
- }
- }
-
- } else if (lsame_(storev, "R")) {
-
- if (lsame_(direct, "F")) {
-
-/*
- Let V = ( V1 V2 ) (V1: first K columns)
- where V1 is unit upper triangular.
-*/
-
- if (lsame_(side, "L")) {
-
-/*
- Form H * C or H' * C where C = ( C1 )
- ( C2 )
-
- W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
-
- W := C1'
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
- &c__1);
-/* L130: */
- }
-
-/* W := W * V1' */
-
- dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b15, &
- v[v_offset], ldv, &work[work_offset], ldwork);
- if (*m > *k) {
-
-/* W := W + C2'*V2' */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, &
- c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 +
- 1], ldv, &c_b15, &work[work_offset], ldwork);
- }
-
-/* W := W * T' or W * T */
-
- dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - V' * W' */
-
- if (*m > *k) {
-
-/* C2 := C2 - V2' * W' */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[
- (*k + 1) * v_dim1 + 1], ldv, &work[work_offset],
- ldwork, &c_b15, &c__[*k + 1 + c_dim1], ldc);
- }
-
-/* W := W * V1 */
-
- dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b15,
- &v[v_offset], ldv, &work[work_offset], ldwork);
-
-/* C1 := C1 - W' */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
-/* L140: */
- }
-/* L150: */
- }
-
- } else if (lsame_(side, "R")) {
-
-/*
- Form C * H or C * H' where C = ( C1 C2 )
-
- W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
-
- W := C1
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
- work_dim1 + 1], &c__1);
-/* L160: */
- }
-
-/* W := W * V1' */
-
- dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b15, &
- v[v_offset], ldv, &work[work_offset], ldwork);
- if (*n > *k) {
-
-/* W := W + C2 * V2' */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, &
- c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) *
- v_dim1 + 1], ldv, &c_b15, &work[work_offset],
- ldwork);
- }
-
-/* W := W * T or W * T' */
-
- dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - W * V */
-
- if (*n > *k) {
-
-/* C2 := C2 - W * V2 */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "No transpose", m, &i__1, k, &
- c_b151, &work[work_offset], ldwork, &v[(*k + 1) *
- v_dim1 + 1], ldv, &c_b15, &c__[(*k + 1) * c_dim1
- + 1], ldc);
- }
-
-/* W := W * V1 */
-
- dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b15,
- &v[v_offset], ldv, &work[work_offset], ldwork);
-
-/* C1 := C1 - W */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
-/* L170: */
- }
-/* L180: */
- }
-
- }
-
- } else {
-
-/*
- Let V = ( V1 V2 ) (V2: last K columns)
- where V2 is unit lower triangular.
-*/
-
- if (lsame_(side, "L")) {
-
-/*
- Form H * C or H' * C where C = ( C1 )
- ( C2 )
-
- W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
-
- W := C2'
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
- work_dim1 + 1], &c__1);
-/* L190: */
- }
-
-/* W := W * V2' */
-
- dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b15, &
- v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[work_offset]
- , ldwork);
- if (*m > *k) {
-
-/* W := W + C1'*V1' */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b15, &
- c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
- work[work_offset], ldwork);
- }
-
-/* W := W * T' or W * T */
-
- dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - V' * W' */
-
- if (*m > *k) {
-
-/* C1 := C1 - V1' * W' */
-
- i__1 = *m - *k;
- dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b151, &v[
- v_offset], ldv, &work[work_offset], ldwork, &
- c_b15, &c__[c_offset], ldc);
- }
-
-/* W := W * V2 */
-
- dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b15,
- &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
- work_offset], ldwork);
-
-/* C2 := C2 - W' */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
- work_dim1];
-/* L200: */
- }
-/* L210: */
- }
-
- } else if (lsame_(side, "R")) {
-
-/*
- Form C * H or C * H' where C = ( C1 C2 )
-
- W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
-
- W := C2
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
- j * work_dim1 + 1], &c__1);
-/* L220: */
- }
-
-/* W := W * V2' */
-
- dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b15, &
- v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[work_offset]
- , ldwork);
- if (*n > *k) {
-
-/* W := W + C1 * V1' */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b15, &
- c__[c_offset], ldc, &v[v_offset], ldv, &c_b15, &
- work[work_offset], ldwork);
- }
-
-/* W := W * T or W * T' */
-
- dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b15, &t[
- t_offset], ldt, &work[work_offset], ldwork);
-
-/* C := C - W * V */
-
- if (*n > *k) {
-
-/* C1 := C1 - W * V1 */
-
- i__1 = *n - *k;
- dgemm_("No transpose", "No transpose", m, &i__1, k, &
- c_b151, &work[work_offset], ldwork, &v[v_offset],
- ldv, &c_b15, &c__[c_offset], ldc);
- }
-
-/* W := W * V2 */
-
- dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b15,
- &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
- work_offset], ldwork);
-
-/* C1 := C1 - W */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
- work_dim1];
-/* L230: */
- }
-/* L240: */
- }
-
- }
-
- }
- }
-
- return 0;
-
-/* End of DLARFB */
-
-} /* dlarfb_ */
-
-/* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x,
- integer *incx, doublereal *tau)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1;
-
- /* Builtin functions */
- double d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer j, knt;
- static doublereal beta;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- static doublereal xnorm;
-
- static doublereal safmin, rsafmn;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLARFG generates a real elementary reflector H of order n, such
- that
-
- H * ( alpha ) = ( beta ), H' * H = I.
- ( x ) ( 0 )
-
- where alpha and beta are scalars, and x is an (n-1)-element real
- vector. H is represented in the form
-
- H = I - tau * ( 1 ) * ( 1 v' ) ,
- ( v )
-
- where tau is a real scalar and v is a real (n-1)-element
- vector.
-
- If the elements of x are all zero, then tau = 0 and H is taken to be
- the unit matrix.
-
- Otherwise 1 <= tau <= 2.
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the elementary reflector.
-
- ALPHA (input/output) DOUBLE PRECISION
- On entry, the value alpha.
- On exit, it is overwritten with the value beta.
-
- X (input/output) DOUBLE PRECISION array, dimension
- (1+(N-2)*abs(INCX))
- On entry, the vector x.
- On exit, it is overwritten with the vector v.
-
- INCX (input) INTEGER
- The increment between elements of X. INCX > 0.
-
- TAU (output) DOUBLE PRECISION
- The value tau.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --x;
-
- /* Function Body */
- if (*n <= 1) {
- *tau = 0.;
- return 0;
- }
-
- i__1 = *n - 1;
- xnorm = dnrm2_(&i__1, &x[1], incx);
-
- if (xnorm == 0.) {
-
-/* H = I */
-
- *tau = 0.;
- } else {
-
-/* general case */
-
- d__1 = dlapy2_(alpha, &xnorm);
- beta = -d_sign(&d__1, alpha);
- safmin = SAFEMINIMUM / EPSILON;
- if (abs(beta) < safmin) {
-
-/* XNORM, BETA may be inaccurate; scale X and recompute them */
-
- rsafmn = 1. / safmin;
- knt = 0;
-L10:
- ++knt;
- i__1 = *n - 1;
- dscal_(&i__1, &rsafmn, &x[1], incx);
- beta *= rsafmn;
- *alpha *= rsafmn;
- if (abs(beta) < safmin) {
- goto L10;
- }
-
-/* New BETA is at most 1, at least SAFMIN */
-
- i__1 = *n - 1;
- xnorm = dnrm2_(&i__1, &x[1], incx);
- d__1 = dlapy2_(alpha, &xnorm);
- beta = -d_sign(&d__1, alpha);
- *tau = (beta - *alpha) / beta;
- i__1 = *n - 1;
- d__1 = 1. / (*alpha - beta);
- dscal_(&i__1, &d__1, &x[1], incx);
-
-/* If ALPHA is subnormal, it may lose relative accuracy */
-
- *alpha = beta;
- i__1 = knt;
- for (j = 1; j <= i__1; ++j) {
- *alpha *= safmin;
-/* L20: */
- }
- } else {
- *tau = (beta - *alpha) / beta;
- i__1 = *n - 1;
- d__1 = 1. / (*alpha - beta);
- dscal_(&i__1, &d__1, &x[1], incx);
- *alpha = beta;
- }
- }
-
- return 0;
-
-/* End of DLARFG */
-
-} /* dlarfg_ */
-
-/* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer *
- k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t,
- integer *ldt)
-{
- /* System generated locals */
- integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
- doublereal d__1;
-
- /* Local variables */
- static integer i__, j;
- static doublereal vii;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *), dtrmv_(char *,
- char *, char *, integer *, doublereal *, integer *, doublereal *,
- integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLARFT forms the triangular factor T of a real block reflector H
- of order n, which is defined as a product of k elementary reflectors.
-
- If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
-
- If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
-
- If STOREV = 'C', the vector which defines the elementary reflector
- H(i) is stored in the i-th column of the array V, and
-
- H = I - V * T * V'
-
- If STOREV = 'R', the vector which defines the elementary reflector
- H(i) is stored in the i-th row of the array V, and
-
- H = I - V' * T * V
-
- Arguments
- =========
-
- DIRECT (input) CHARACTER*1
- Specifies the order in which the elementary reflectors are
- multiplied to form the block reflector:
- = 'F': H = H(1) H(2) . . . H(k) (Forward)
- = 'B': H = H(k) . . . H(2) H(1) (Backward)
-
- STOREV (input) CHARACTER*1
- Specifies how the vectors which define the elementary
- reflectors are stored (see also Further Details):
- = 'C': columnwise
- = 'R': rowwise
-
- N (input) INTEGER
- The order of the block reflector H. N >= 0.
-
- K (input) INTEGER
- The order of the triangular factor T (= the number of
- elementary reflectors). K >= 1.
-
- V (input/output) DOUBLE PRECISION array, dimension
- (LDV,K) if STOREV = 'C'
- (LDV,N) if STOREV = 'R'
- The matrix V. See further details.
-
- LDV (input) INTEGER
- The leading dimension of the array V.
- If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i).
-
- T (output) DOUBLE PRECISION array, dimension (LDT,K)
- The k by k triangular factor T of the block reflector.
- If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
- lower triangular. The rest of the array is not used.
-
- LDT (input) INTEGER
- The leading dimension of the array T. LDT >= K.
-
- Further Details
- ===============
-
- The shape of the matrix V and the storage of the vectors which define
- the H(i) is best illustrated by the following example with n = 5 and
- k = 3. The elements equal to 1 are not stored; the corresponding
- array elements are modified but restored on exit. The rest of the
- array is not used.
-
- DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
-
- V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
- ( v1 1 ) ( 1 v2 v2 v2 )
- ( v1 v2 1 ) ( 1 v3 v3 )
- ( v1 v2 v3 )
- ( v1 v2 v3 )
-
- DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
-
- V = ( v1 v2 v3 ) V = ( v1 v1 1 )
- ( v1 v2 v3 ) ( v2 v2 v2 1 )
- ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
- ( 1 v3 )
- ( 1 )
-
- =====================================================================
-
-
- Quick return if possible
-*/
-
- /* Parameter adjustments */
- v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
- v -= v_offset;
- --tau;
- t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
- t -= t_offset;
-
- /* Function Body */
- if (*n == 0) {
- return 0;
- }
-
- if (lsame_(direct, "F")) {
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (tau[i__] == 0.) {
-
-/* H(i) = I */
-
- i__2 = i__;
- for (j = 1; j <= i__2; ++j) {
- t[j + i__ * t_dim1] = 0.;
-/* L10: */
- }
- } else {
-
-/* general case */
-
- vii = v[i__ + i__ * v_dim1];
- v[i__ + i__ * v_dim1] = 1.;
- if (lsame_(storev, "C")) {
-
-/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
-
- i__2 = *n - i__ + 1;
- i__3 = i__ - 1;
- d__1 = -tau[i__];
- dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1],
- ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b29, &t[
- i__ * t_dim1 + 1], &c__1);
- } else {
-
-/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
-
- i__2 = i__ - 1;
- i__3 = *n - i__ + 1;
- d__1 = -tau[i__];
- dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ *
- v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
- c_b29, &t[i__ * t_dim1 + 1], &c__1);
- }
- v[i__ + i__ * v_dim1] = vii;
-
-/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
-
- i__2 = i__ - 1;
- dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
- t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
- t[i__ + i__ * t_dim1] = tau[i__];
- }
-/* L20: */
- }
- } else {
- for (i__ = *k; i__ >= 1; --i__) {
- if (tau[i__] == 0.) {
-
-/* H(i) = I */
-
- i__1 = *k;
- for (j = i__; j <= i__1; ++j) {
- t[j + i__ * t_dim1] = 0.;
-/* L30: */
- }
- } else {
-
-/* general case */
-
- if (i__ < *k) {
- if (lsame_(storev, "C")) {
- vii = v[*n - *k + i__ + i__ * v_dim1];
- v[*n - *k + i__ + i__ * v_dim1] = 1.;
-
-/*
- T(i+1:k,i) :=
- - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
-*/
-
- i__1 = *n - *k + i__;
- i__2 = *k - i__;
- d__1 = -tau[i__];
- dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1)
- * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], &
- c__1, &c_b29, &t[i__ + 1 + i__ * t_dim1], &
- c__1);
- v[*n - *k + i__ + i__ * v_dim1] = vii;
- } else {
- vii = v[i__ + (*n - *k + i__) * v_dim1];
- v[i__ + (*n - *k + i__) * v_dim1] = 1.;
-
-/*
- T(i+1:k,i) :=
- - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
-*/
-
- i__1 = *k - i__;
- i__2 = *n - *k + i__;
- d__1 = -tau[i__];
- dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ +
- 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
- c_b29, &t[i__ + 1 + i__ * t_dim1], &c__1);
- v[i__ + (*n - *k + i__) * v_dim1] = vii;
- }
-
-/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
-
- i__1 = *k - i__;
- dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
- + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
- t_dim1], &c__1)
- ;
- }
- t[i__ + i__ * t_dim1] = tau[i__];
- }
-/* L40: */
- }
- }
- return 0;
-
-/* End of DLARFT */
-
-} /* dlarft_ */
-
-/* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal *
- v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work)
-{
- /* System generated locals */
- integer c_dim1, c_offset, i__1;
- doublereal d__1;
-
- /* Local variables */
- static integer j;
- static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5,
- v6, v7, v8, v9, t10, v10, sum;
- extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLARFX applies a real elementary reflector H to a real m by n
- matrix C, from either the left or the right. H is represented in the
- form
-
- H = I - tau * v * v'
-
- where tau is a real scalar and v is a real vector.
-
- If tau = 0, then H is taken to be the unit matrix
-
- This version uses inline code if H has order < 11.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': form H * C
- = 'R': form C * H
-
- M (input) INTEGER
- The number of rows of the matrix C.
-
- N (input) INTEGER
- The number of columns of the matrix C.
-
- V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L'
- or (N) if SIDE = 'R'
- The vector v in the representation of H.
-
- TAU (input) DOUBLE PRECISION
- The value tau in the representation of H.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by the matrix H * C if SIDE = 'L',
- or C * H if SIDE = 'R'.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDA >= (1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (N) if SIDE = 'L'
- or (M) if SIDE = 'R'
- WORK is not referenced if H has order < 11.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --v;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- if (*tau == 0.) {
- return 0;
- }
- if (lsame_(side, "L")) {
-
-/* Form H * C, where H has order m. */
-
- switch (*m) {
- case 1: goto L10;
- case 2: goto L30;
- case 3: goto L50;
- case 4: goto L70;
- case 5: goto L90;
- case 6: goto L110;
- case 7: goto L130;
- case 8: goto L150;
- case 9: goto L170;
- case 10: goto L190;
- }
-
-/*
- Code for general M
-
- w := C'*v
-*/
-
- dgemv_("Transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &c__1, &
- c_b29, &work[1], &c__1);
-
-/* C := C - tau * v * w' */
-
- d__1 = -(*tau);
- dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc)
- ;
- goto L410;
-L10:
-
-/* Special code for 1 x 1 Householder */
-
- t1 = 1. - *tau * v[1] * v[1];
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
-/* L20: */
- }
- goto L410;
-L30:
-
-/* Special code for 2 x 2 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
-/* L40: */
- }
- goto L410;
-L50:
-
-/* Special code for 3 x 3 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
-/* L60: */
- }
- goto L410;
-L70:
-
-/* Special code for 4 x 4 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
-/* L80: */
- }
- goto L410;
-L90:
-
-/* Special code for 5 x 5 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
-/* L100: */
- }
- goto L410;
-L110:
-
-/* Special code for 6 x 6 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
- c__[j * c_dim1 + 6] -= sum * t6;
-/* L120: */
- }
- goto L410;
-L130:
-
-/* Special code for 7 x 7 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
- c_dim1 + 7];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
- c__[j * c_dim1 + 6] -= sum * t6;
- c__[j * c_dim1 + 7] -= sum * t7;
-/* L140: */
- }
- goto L410;
-L150:
-
-/* Special code for 8 x 8 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
- c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
- c__[j * c_dim1 + 6] -= sum * t6;
- c__[j * c_dim1 + 7] -= sum * t7;
- c__[j * c_dim1 + 8] -= sum * t8;
-/* L160: */
- }
- goto L410;
-L170:
-
-/* Special code for 9 x 9 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- v9 = v[9];
- t9 = *tau * v9;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
- c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
- c_dim1 + 9];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
- c__[j * c_dim1 + 6] -= sum * t6;
- c__[j * c_dim1 + 7] -= sum * t7;
- c__[j * c_dim1 + 8] -= sum * t8;
- c__[j * c_dim1 + 9] -= sum * t9;
-/* L180: */
- }
- goto L410;
-L190:
-
-/* Special code for 10 x 10 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- v9 = v[9];
- t9 = *tau * v9;
- v10 = v[10];
- t10 = *tau * v10;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
- c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
- j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
- c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
- c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
- c__[j * c_dim1 + 1] -= sum * t1;
- c__[j * c_dim1 + 2] -= sum * t2;
- c__[j * c_dim1 + 3] -= sum * t3;
- c__[j * c_dim1 + 4] -= sum * t4;
- c__[j * c_dim1 + 5] -= sum * t5;
- c__[j * c_dim1 + 6] -= sum * t6;
- c__[j * c_dim1 + 7] -= sum * t7;
- c__[j * c_dim1 + 8] -= sum * t8;
- c__[j * c_dim1 + 9] -= sum * t9;
- c__[j * c_dim1 + 10] -= sum * t10;
-/* L200: */
- }
- goto L410;
- } else {
-
-/* Form C * H, where H has order n. */
-
- switch (*n) {
- case 1: goto L210;
- case 2: goto L230;
- case 3: goto L250;
- case 4: goto L270;
- case 5: goto L290;
- case 6: goto L310;
- case 7: goto L330;
- case 8: goto L350;
- case 9: goto L370;
- case 10: goto L390;
- }
-
-/*
- Code for general N
-
- w := C * v
-*/
-
- dgemv_("No transpose", m, n, &c_b15, &c__[c_offset], ldc, &v[1], &
- c__1, &c_b29, &work[1], &c__1);
-
-/* C := C - tau * w * v' */
-
- d__1 = -(*tau);
- dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc)
- ;
- goto L410;
-L210:
-
-/* Special code for 1 x 1 Householder */
-
- t1 = 1. - *tau * v[1] * v[1];
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- c__[j + c_dim1] = t1 * c__[j + c_dim1];
-/* L220: */
- }
- goto L410;
-L230:
-
-/* Special code for 2 x 2 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
-/* L240: */
- }
- goto L410;
-L250:
-
-/* Special code for 3 x 3 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
-/* L260: */
- }
- goto L410;
-L270:
-
-/* Special code for 4 x 4 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
-/* L280: */
- }
- goto L410;
-L290:
-
-/* Special code for 5 x 5 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
-/* L300: */
- }
- goto L410;
-L310:
-
-/* Special code for 6 x 6 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
- c__[j + c_dim1 * 6] -= sum * t6;
-/* L320: */
- }
- goto L410;
-L330:
-
-/* Special code for 7 x 7 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
- c__[j + c_dim1 * 7];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
- c__[j + c_dim1 * 6] -= sum * t6;
- c__[j + c_dim1 * 7] -= sum * t7;
-/* L340: */
- }
- goto L410;
-L350:
-
-/* Special code for 8 x 8 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
- c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
- c__[j + c_dim1 * 6] -= sum * t6;
- c__[j + c_dim1 * 7] -= sum * t7;
- c__[j + ((c_dim1) << (3))] -= sum * t8;
-/* L360: */
- }
- goto L410;
-L370:
-
-/* Special code for 9 x 9 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- v9 = v[9];
- t9 = *tau * v9;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
- c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
- v9 * c__[j + c_dim1 * 9];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
- c__[j + c_dim1 * 6] -= sum * t6;
- c__[j + c_dim1 * 7] -= sum * t7;
- c__[j + ((c_dim1) << (3))] -= sum * t8;
- c__[j + c_dim1 * 9] -= sum * t9;
-/* L380: */
- }
- goto L410;
-L390:
-
-/* Special code for 10 x 10 Householder */
-
- v1 = v[1];
- t1 = *tau * v1;
- v2 = v[2];
- t2 = *tau * v2;
- v3 = v[3];
- t3 = *tau * v3;
- v4 = v[4];
- t4 = *tau * v4;
- v5 = v[5];
- t5 = *tau * v5;
- v6 = v[6];
- t6 = *tau * v6;
- v7 = v[7];
- t7 = *tau * v7;
- v8 = v[8];
- t8 = *tau * v8;
- v9 = v[9];
- t9 = *tau * v9;
- v10 = v[10];
- t10 = *tau * v10;
- i__1 = *m;
- for (j = 1; j <= i__1; ++j) {
- sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
- * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
- v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
- c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
- v9 * c__[j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
- c__[j + c_dim1] -= sum * t1;
- c__[j + ((c_dim1) << (1))] -= sum * t2;
- c__[j + c_dim1 * 3] -= sum * t3;
- c__[j + ((c_dim1) << (2))] -= sum * t4;
- c__[j + c_dim1 * 5] -= sum * t5;
- c__[j + c_dim1 * 6] -= sum * t6;
- c__[j + c_dim1 * 7] -= sum * t7;
- c__[j + ((c_dim1) << (3))] -= sum * t8;
- c__[j + c_dim1 * 9] -= sum * t9;
- c__[j + c_dim1 * 10] -= sum * t10;
-/* L400: */
- }
- goto L410;
- }
-L410:
- return 0;
-
-/* End of DLARFX */
-
-} /* dlarfx_ */
-
-/* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs,
- doublereal *sn, doublereal *r__)
-{
- /* Initialized data */
-
- static logical first = TRUE_;
-
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
-
- /* Local variables */
- static integer i__;
- static doublereal f1, g1, eps, scale;
- static integer count;
- static doublereal safmn2, safmx2;
-
- static doublereal safmin;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLARTG generate a plane rotation so that
-
- [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1.
- [ -SN CS ] [ G ] [ 0 ]
-
- This is a slower, more accurate version of the BLAS1 routine DROTG,
- with the following other differences:
- F and G are unchanged on return.
- If G=0, then CS=1 and SN=0.
- If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any
- floating point operations (saves work in DBDSQR when
- there are zeros on the diagonal).
-
- If F exceeds G in magnitude, CS will be positive.
-
- Arguments
- =========
-
- F (input) DOUBLE PRECISION
- The first component of vector to be rotated.
-
- G (input) DOUBLE PRECISION
- The second component of vector to be rotated.
-
- CS (output) DOUBLE PRECISION
- The cosine of the rotation.
-
- SN (output) DOUBLE PRECISION
- The sine of the rotation.
-
- R (output) DOUBLE PRECISION
- The nonzero component of the rotated vector.
-
- =====================================================================
-*/
-
-
- if (first) {
- first = FALSE_;
- safmin = SAFEMINIMUM;
- eps = EPSILON;
- d__1 = BASE;
- i__1 = (integer) (log(safmin / eps) / log(BASE) /
- 2.);
- safmn2 = pow_di(&d__1, &i__1);
- safmx2 = 1. / safmn2;
- }
- if (*g == 0.) {
- *cs = 1.;
- *sn = 0.;
- *r__ = *f;
- } else if (*f == 0.) {
- *cs = 0.;
- *sn = 1.;
- *r__ = *g;
- } else {
- f1 = *f;
- g1 = *g;
-/* Computing MAX */
- d__1 = abs(f1), d__2 = abs(g1);
- scale = max(d__1,d__2);
- if (scale >= safmx2) {
- count = 0;
-L10:
- ++count;
- f1 *= safmn2;
- g1 *= safmn2;
-/* Computing MAX */
- d__1 = abs(f1), d__2 = abs(g1);
- scale = max(d__1,d__2);
- if (scale >= safmx2) {
- goto L10;
- }
-/* Computing 2nd power */
- d__1 = f1;
-/* Computing 2nd power */
- d__2 = g1;
- *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
- *cs = f1 / *r__;
- *sn = g1 / *r__;
- i__1 = count;
- for (i__ = 1; i__ <= i__1; ++i__) {
- *r__ *= safmx2;
-/* L20: */
- }
- } else if (scale <= safmn2) {
- count = 0;
-L30:
- ++count;
- f1 *= safmx2;
- g1 *= safmx2;
-/* Computing MAX */
- d__1 = abs(f1), d__2 = abs(g1);
- scale = max(d__1,d__2);
- if (scale <= safmn2) {
- goto L30;
- }
-/* Computing 2nd power */
- d__1 = f1;
-/* Computing 2nd power */
- d__2 = g1;
- *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
- *cs = f1 / *r__;
- *sn = g1 / *r__;
- i__1 = count;
- for (i__ = 1; i__ <= i__1; ++i__) {
- *r__ *= safmn2;
-/* L40: */
- }
- } else {
-/* Computing 2nd power */
- d__1 = f1;
-/* Computing 2nd power */
- d__2 = g1;
- *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
- *cs = f1 / *r__;
- *sn = g1 / *r__;
- }
- if ((abs(*f) > abs(*g) && *cs < 0.)) {
- *cs = -(*cs);
- *sn = -(*sn);
- *r__ = -(*r__);
- }
- }
- return 0;
-
-/* End of DLARTG */
-
-} /* dlartg_ */
-
-/* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__,
- doublereal *ssmin, doublereal *ssmax)
-{
- /* System generated locals */
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DLAS2 computes the singular values of the 2-by-2 matrix
- [ F G ]
- [ 0 H ].
- On return, SSMIN is the smaller singular value and SSMAX is the
- larger singular value.
-
- Arguments
- =========
-
- F (input) DOUBLE PRECISION
- The (1,1) element of the 2-by-2 matrix.
-
- G (input) DOUBLE PRECISION
- The (1,2) element of the 2-by-2 matrix.
-
- H (input) DOUBLE PRECISION
- The (2,2) element of the 2-by-2 matrix.
-
- SSMIN (output) DOUBLE PRECISION
- The smaller singular value.
-
- SSMAX (output) DOUBLE PRECISION
- The larger singular value.
-
- Further Details
- ===============
-
- Barring over/underflow, all output quantities are correct to within
- a few units in the last place (ulps), even in the absence of a guard
- digit in addition/subtraction.
-
- In IEEE arithmetic, the code works correctly if one matrix element is
- infinite.
-
- Overflow will not occur unless the largest singular value itself
- overflows, or is within a few ulps of overflow. (On machines with
- partial overflow, like the Cray, overflow may occur if the largest
- singular value is within a factor of 2 of overflow.)
-
- Underflow is harmless if underflow is gradual. Otherwise, results
- may correspond to a matrix modified by perturbations of size near
- the underflow threshold.
-
- ====================================================================
-*/
-
-
- fa = abs(*f);
- ga = abs(*g);
- ha = abs(*h__);
- fhmn = min(fa,ha);
- fhmx = max(fa,ha);
- if (fhmn == 0.) {
- *ssmin = 0.;
- if (fhmx == 0.) {
- *ssmax = ga;
- } else {
-/* Computing 2nd power */
- d__1 = min(fhmx,ga) / max(fhmx,ga);
- *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.);
- }
- } else {
- if (ga < fhmx) {
- as = fhmn / fhmx + 1.;
- at = (fhmx - fhmn) / fhmx;
-/* Computing 2nd power */
- d__1 = ga / fhmx;
- au = d__1 * d__1;
- c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au));
- *ssmin = fhmn * c__;
- *ssmax = fhmx / c__;
- } else {
- au = fhmx / ga;
- if (au == 0.) {
-
-/*
- Avoid possible harmful underflow if exponent range
- asymmetric (true SSMIN may not underflow even if
- AU underflows)
-*/
-
- *ssmin = fhmn * fhmx / ga;
- *ssmax = ga;
- } else {
- as = fhmn / fhmx + 1.;
- at = (fhmx - fhmn) / fhmx;
-/* Computing 2nd power */
- d__1 = as * au;
-/* Computing 2nd power */
- d__2 = at * au;
- c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.));
- *ssmin = fhmn * c__ * au;
- *ssmin += *ssmin;
- *ssmax = ga / (c__ + c__);
- }
- }
- }
- return 0;
-
-/* End of DLAS2 */
-
-} /* dlas2_ */
-
-/* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku,
- doublereal *cfrom, doublereal *cto, integer *m, integer *n,
- doublereal *a, integer *lda, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
-
- /* Local variables */
- static integer i__, j, k1, k2, k3, k4;
- static doublereal mul, cto1;
- static logical done;
- static doublereal ctoc;
- extern logical lsame_(char *, char *);
- static integer itype;
- static doublereal cfrom1;
-
- static doublereal cfromc;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal bignum, smlnum;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DLASCL multiplies the M by N real matrix A by the real scalar
- CTO/CFROM. This is done without over/underflow as long as the final
- result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
- A may be full, upper triangular, lower triangular, upper Hessenberg,
- or banded.
-
- Arguments
- =========
-
- TYPE (input) CHARACTER*1
- TYPE indices the storage type of the input matrix.
- = 'G': A is a full matrix.
- = 'L': A is a lower triangular matrix.
- = 'U': A is an upper triangular matrix.
- = 'H': A is an upper Hessenberg matrix.
- = 'B': A is a symmetric band matrix with lower bandwidth KL
- and upper bandwidth KU and with the only the lower
- half stored.
- = 'Q': A is a symmetric band matrix with lower bandwidth KL
- and upper bandwidth KU and with the only the upper
- half stored.
- = 'Z': A is a band matrix with lower bandwidth KL and upper
- bandwidth KU.
-
- KL (input) INTEGER
- The lower bandwidth of A. Referenced only if TYPE = 'B',
- 'Q' or 'Z'.
-
- KU (input) INTEGER
- The upper bandwidth of A. Referenced only if TYPE = 'B',
- 'Q' or 'Z'.
-
- CFROM (input) DOUBLE PRECISION
- CTO (input) DOUBLE PRECISION
- The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
- without over/underflow if the final result CTO*A(I,J)/CFROM
- can be represented without over/underflow. CFROM must be
- nonzero.
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,M)
- The matrix to be multiplied by CTO/CFROM. See TYPE for the
- storage type.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- INFO (output) INTEGER
- 0 - successful exit
- <0 - if INFO = -i, the i-th argument had an illegal value.
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- *info = 0;
-
- if (lsame_(type__, "G")) {
- itype = 0;
- } else if (lsame_(type__, "L")) {
- itype = 1;
- } else if (lsame_(type__, "U")) {
- itype = 2;
- } else if (lsame_(type__, "H")) {
- itype = 3;
- } else if (lsame_(type__, "B")) {
- itype = 4;
- } else if (lsame_(type__, "Q")) {
- itype = 5;
- } else if (lsame_(type__, "Z")) {
- itype = 6;
- } else {
- itype = -1;
- }
-
- if (itype == -1) {
- *info = -1;
- } else if (*cfrom == 0.) {
- *info = -4;
- } else if (*m < 0) {
- *info = -6;
- } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m))
- {
- *info = -7;
- } else if ((itype <= 3 && *lda < max(1,*m))) {
- *info = -9;
- } else if (itype >= 4) {
-/* Computing MAX */
- i__1 = *m - 1;
- if (*kl < 0 || *kl > max(i__1,0)) {
- *info = -2;
- } else /* if(complicated condition) */ {
-/* Computing MAX */
- i__1 = *n - 1;
- if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) &&
- *kl != *ku)) {
- *info = -3;
- } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda <
- *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku +
- 1)) {
- *info = -9;
- }
- }
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASCL", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0 || *m == 0) {
- return 0;
- }
-
-/* Get machine parameters */
-
- smlnum = SAFEMINIMUM;
- bignum = 1. / smlnum;
-
- cfromc = *cfrom;
- ctoc = *cto;
-
-L10:
- cfrom1 = cfromc * smlnum;
- cto1 = ctoc / bignum;
- if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) {
- mul = smlnum;
- done = FALSE_;
- cfromc = cfrom1;
- } else if (abs(cto1) > abs(cfromc)) {
- mul = bignum;
- done = FALSE_;
- ctoc = cto1;
- } else {
- mul = ctoc / cfromc;
- done = TRUE_;
- }
-
- if (itype == 0) {
-
-/* Full matrix */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L20: */
- }
-/* L30: */
- }
-
- } else if (itype == 1) {
-
-/* Lower triangular matrix */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = j; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L40: */
- }
-/* L50: */
- }
-
- } else if (itype == 2) {
-
-/* Upper triangular matrix */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = min(j,*m);
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L60: */
- }
-/* L70: */
- }
-
- } else if (itype == 3) {
-
-/* Upper Hessenberg matrix */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = j + 1;
- i__2 = min(i__3,*m);
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L80: */
- }
-/* L90: */
- }
-
- } else if (itype == 4) {
-
-/* Lower half of a symmetric band matrix */
-
- k3 = *kl + 1;
- k4 = *n + 1;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = k3, i__4 = k4 - j;
- i__2 = min(i__3,i__4);
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L100: */
- }
-/* L110: */
- }
-
- } else if (itype == 5) {
-
-/* Upper half of a symmetric band matrix */
-
- k1 = *ku + 2;
- k3 = *ku + 1;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MAX */
- i__2 = k1 - j;
- i__3 = k3;
- for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L120: */
- }
-/* L130: */
- }
-
- } else if (itype == 6) {
-
-/* Band matrix */
-
- k1 = *kl + *ku + 2;
- k2 = *kl + 1;
- k3 = ((*kl) << (1)) + *ku + 1;
- k4 = *kl + *ku + 1 + *m;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-/* Computing MAX */
- i__3 = k1 - j;
-/* Computing MIN */
- i__4 = k3, i__5 = k4 - j;
- i__2 = min(i__4,i__5);
- for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] *= mul;
-/* L140: */
- }
-/* L150: */
- }
-
- }
-
- if (! done) {
- goto L10;
- }
-
- return 0;
-
-/* End of DLASCL */
-
-} /* dlascl_ */
-
-/* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__,
- doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *
- ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer *
- info)
-{
- /* System generated locals */
- integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
-
- /* Builtin functions */
- integer pow_ii(integer *, integer *);
-
- /* Local variables */
- static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf,
- iwk, lvl, ndb1, nlp1, nrp1;
- static doublereal beta;
- static integer idxq, nlvl;
- static doublereal alpha;
- static integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
- extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, integer *, integer *, doublereal *,
- integer *), dlasdq_(char *, integer *, integer *, integer *,
- integer *, integer *, doublereal *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *), dlasdt_(integer *, integer *,
- integer *, integer *, integer *, integer *, integer *), xerbla_(
- char *, integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- Using a divide and conquer approach, DLASD0 computes the singular
- value decomposition (SVD) of a real upper bidiagonal N-by-M
- matrix B with diagonal D and offdiagonal E, where M = N + SQRE.
- The algorithm computes orthogonal matrices U and VT such that
- B = U * S * VT. The singular values S are overwritten on D.
-
- A related subroutine, DLASDA, computes only the singular values,
- and optionally, the singular vectors in compact form.
-
- Arguments
- =========
-
- N (input) INTEGER
- On entry, the row dimension of the upper bidiagonal matrix.
- This is also the dimension of the main diagonal array D.
-
- SQRE (input) INTEGER
- Specifies the column dimension of the bidiagonal matrix.
- = 0: The bidiagonal matrix has column dimension M = N;
- = 1: The bidiagonal matrix has column dimension M = N+1;
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry D contains the main diagonal of the bidiagonal
- matrix.
- On exit D, if INFO = 0, contains its singular values.
-
- E (input) DOUBLE PRECISION array, dimension (M-1)
- Contains the subdiagonal entries of the bidiagonal matrix.
- On exit, E has been destroyed.
-
- U (output) DOUBLE PRECISION array, dimension at least (LDQ, N)
- On exit, U contains the left singular vectors.
-
- LDU (input) INTEGER
- On entry, leading dimension of U.
-
- VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M)
- On exit, VT' contains the right singular vectors.
-
- LDVT (input) INTEGER
- On entry, leading dimension of VT.
-
- SMLSIZ (input) INTEGER
- On entry, maximum size of the subproblems at the
- bottom of the computation tree.
-
- IWORK INTEGER work array.
- Dimension must be at least (8 * N)
-
- WORK DOUBLE PRECISION work array.
- Dimension must be at least (3 * M**2 + 2 * M)
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- --iwork;
- --work;
-
- /* Function Body */
- *info = 0;
-
- if (*n < 0) {
- *info = -1;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -2;
- }
-
- m = *n + *sqre;
-
- if (*ldu < *n) {
- *info = -6;
- } else if (*ldvt < m) {
- *info = -8;
- } else if (*smlsiz < 3) {
- *info = -9;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD0", &i__1);
- return 0;
- }
-
-/* If the input matrix is too small, call DLASDQ to find the SVD. */
-
- if (*n <= *smlsiz) {
- dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
- ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
- return 0;
- }
-
-/* Set up the computation tree. */
-
- inode = 1;
- ndiml = inode + *n;
- ndimr = ndiml + *n;
- idxq = ndimr + *n;
- iwk = idxq + *n;
- dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
- smlsiz);
-
-/*
- For the nodes on bottom level of the tree, solve
- their subproblems by DLASDQ.
-*/
-
- ndb1 = (nd + 1) / 2;
- ncc = 0;
- i__1 = nd;
- for (i__ = ndb1; i__ <= i__1; ++i__) {
-
-/*
- IC : center row of each node
- NL : number of rows of left subproblem
- NR : number of rows of right subproblem
- NLF: starting row of the left subproblem
- NRF: starting row of the right subproblem
-*/
-
- i1 = i__ - 1;
- ic = iwork[inode + i1];
- nl = iwork[ndiml + i1];
- nlp1 = nl + 1;
- nr = iwork[ndimr + i1];
- nrp1 = nr + 1;
- nlf = ic - nl;
- nrf = ic + 1;
- sqrei = 1;
- dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
- nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
- nlf + nlf * u_dim1], ldu, &work[1], info);
- if (*info != 0) {
- return 0;
- }
- itemp = idxq + nlf - 2;
- i__2 = nl;
- for (j = 1; j <= i__2; ++j) {
- iwork[itemp + j] = j;
-/* L10: */
- }
- if (i__ == nd) {
- sqrei = *sqre;
- } else {
- sqrei = 1;
- }
- nrp1 = nr + sqrei;
- dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
- nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
- nrf + nrf * u_dim1], ldu, &work[1], info);
- if (*info != 0) {
- return 0;
- }
- itemp = idxq + ic;
- i__2 = nr;
- for (j = 1; j <= i__2; ++j) {
- iwork[itemp + j - 1] = j;
-/* L20: */
- }
-/* L30: */
- }
-
-/* Now conquer each subproblem bottom-up. */
-
- for (lvl = nlvl; lvl >= 1; --lvl) {
-
-/*
- Find the first node LF and last node LL on the
- current level LVL.
-*/
-
- if (lvl == 1) {
- lf = 1;
- ll = 1;
- } else {
- i__1 = lvl - 1;
- lf = pow_ii(&c__2, &i__1);
- ll = ((lf) << (1)) - 1;
- }
- i__1 = ll;
- for (i__ = lf; i__ <= i__1; ++i__) {
- im1 = i__ - 1;
- ic = iwork[inode + im1];
- nl = iwork[ndiml + im1];
- nr = iwork[ndimr + im1];
- nlf = ic - nl;
- if ((*sqre == 0 && i__ == ll)) {
- sqrei = *sqre;
- } else {
- sqrei = 1;
- }
- idxqc = idxq + nlf - 1;
- alpha = d__[ic];
- beta = e[ic];
- dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
- u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
- idxqc], &iwork[iwk], &work[1], info);
- if (*info != 0) {
- return 0;
- }
-/* L40: */
- }
-/* L50: */
- }
-
- return 0;
-
-/* End of DLASD0 */
-
-} /* dlasd0_ */
-
-/* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre,
- doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u,
- integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer *
- iwork, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2,
- idxc, idxp, ldvt2;
- extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, integer *,
- integer *, integer *, integer *, integer *, integer *), dlasd3_(
- integer *, integer *, integer *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, integer *, integer *, doublereal *, integer *),
- dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
- integer *, integer *, doublereal *, integer *, integer *),
- dlamrg_(integer *, integer *, doublereal *, integer *, integer *,
- integer *);
- static integer isigma;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal orgnrm;
- static integer coltyp;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B,
- where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0.
-
- A related subroutine DLASD7 handles the case in which the singular
- values (and the singular vectors in factored form) are desired.
-
- DLASD1 computes the SVD as follows:
-
- ( D1(in) 0 0 0 )
- B = U(in) * ( Z1' a Z2' b ) * VT(in)
- ( 0 0 D2(in) 0 )
-
- = U(out) * ( D(out) 0) * VT(out)
-
- where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
- with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
- elsewhere; and the entry b is empty if SQRE = 0.
-
- The left singular vectors of the original matrix are stored in U, and
- the transpose of the right singular vectors are stored in VT, and the
- singular values are in D. The algorithm consists of three stages:
-
- The first stage consists of deflating the size of the problem
- when there are multiple singular values or when there are zeros in
- the Z vector. For each such occurence the dimension of the
- secular equation problem is reduced by one. This stage is
- performed by the routine DLASD2.
-
- The second stage consists of calculating the updated
- singular values. This is done by finding the square roots of the
- roots of the secular equation via the routine DLASD4 (as called
- by DLASD3). This routine also calculates the singular vectors of
- the current problem.
-
- The final stage consists of computing the updated singular vectors
- directly using the updated singular values. The singular vectors
- for the current problem are multiplied with the singular vectors
- from the overall problem.
-
- Arguments
- =========
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has row dimension N = NL + NR + 1,
- and column dimension M = N + SQRE.
-
- D (input/output) DOUBLE PRECISION array,
- dimension (N = NL+NR+1).
- On entry D(1:NL,1:NL) contains the singular values of the
- upper block; and D(NL+2:N) contains the singular values of
- the lower block. On exit D(1:N) contains the singular values
- of the modified matrix.
-
- ALPHA (input) DOUBLE PRECISION
- Contains the diagonal element associated with the added row.
-
- BETA (input) DOUBLE PRECISION
- Contains the off-diagonal element associated with the added
- row.
-
- U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
- On entry U(1:NL, 1:NL) contains the left singular vectors of
- the upper block; U(NL+2:N, NL+2:N) contains the left singular
- vectors of the lower block. On exit U contains the left
- singular vectors of the bidiagonal matrix.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= max( 1, N ).
-
- VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
- where M = N + SQRE.
- On entry VT(1:NL+1, 1:NL+1)' contains the right singular
- vectors of the upper block; VT(NL+2:M, NL+2:M)' contains
- the right singular vectors of the lower block. On exit
- VT' contains the right singular vectors of the
- bidiagonal matrix.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT. LDVT >= max( 1, M ).
-
- IDXQ (output) INTEGER array, dimension(N)
- This contains the permutation which will reintegrate the
- subproblem just solved back into sorted order, i.e.
- D( IDXQ( I = 1, N ) ) will be in ascending order.
-
- IWORK (workspace) INTEGER array, dimension( 4 * N )
-
- WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M )
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- --idxq;
- --iwork;
- --work;
-
- /* Function Body */
- *info = 0;
-
- if (*nl < 1) {
- *info = -1;
- } else if (*nr < 1) {
- *info = -2;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -3;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD1", &i__1);
- return 0;
- }
-
- n = *nl + *nr + 1;
- m = n + *sqre;
-
-/*
- The following values are for bookkeeping purposes only. They are
- integer pointers which indicate the portion of the workspace
- used by a particular array in DLASD2 and DLASD3.
-*/
-
- ldu2 = n;
- ldvt2 = m;
-
- iz = 1;
- isigma = iz + m;
- iu2 = isigma + n;
- ivt2 = iu2 + ldu2 * n;
- iq = ivt2 + ldvt2 * m;
-
- idx = 1;
- idxc = idx + n;
- coltyp = idxc + n;
- idxp = coltyp + n;
-
-/*
- Scale.
-
- Computing MAX
-*/
- d__1 = abs(*alpha), d__2 = abs(*beta);
- orgnrm = max(d__1,d__2);
- d__[*nl + 1] = 0.;
- i__1 = n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
- orgnrm = (d__1 = d__[i__], abs(d__1));
- }
-/* L10: */
- }
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info);
- *alpha /= orgnrm;
- *beta /= orgnrm;
-
-/* Deflate singular values. */
-
- dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
- ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
- work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
- idxq[1], &iwork[coltyp], info);
-
-/* Solve Secular Equation and update singular vectors. */
-
- ldq = k;
- dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
- u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
- ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
- if (*info != 0) {
- return 0;
- }
-
-/* Unscale. */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info);
-
-/* Prepare the IDXQ sorting permutation. */
-
- n1 = k;
- n2 = n - k;
- dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
-
- return 0;
-
-/* End of DLASD1 */
-
-} /* dlasd1_ */
-
-/* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer
- *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal *
- beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt,
- doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2,
- integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer *
- idxq, integer *coltyp, integer *info)
-{
- /* System generated locals */
- integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
- vt2_dim1, vt2_offset, i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static doublereal c__;
- static integer i__, j, m, n;
- static doublereal s;
- static integer k2;
- static doublereal z1;
- static integer ct, jp;
- static doublereal eps, tau, tol;
- static integer psm[4], nlp1, nlp2, idxi, idxj;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer ctot[4], idxjp;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer jprev;
-
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), dlacpy_(char *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
- doublereal *, doublereal *, integer *), xerbla_(char *,
- integer *);
- static doublereal hlftol;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASD2 merges the two sets of singular values together into a single
- sorted set. Then it tries to deflate the size of the problem.
- There are two ways in which deflation can occur: when two or more
- singular values are close together or if there is a tiny entry in the
- Z vector. For each such occurrence the order of the related secular
- equation problem is reduced by one.
-
- DLASD2 is called from DLASD1.
-
- Arguments
- =========
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has N = NL + NR + 1 rows and
- M = N + SQRE >= N columns.
-
- K (output) INTEGER
- Contains the dimension of the non-deflated matrix,
- This is the order of the related secular equation. 1 <= K <=N.
-
- D (input/output) DOUBLE PRECISION array, dimension(N)
- On entry D contains the singular values of the two submatrices
- to be combined. On exit D contains the trailing (N-K) updated
- singular values (those which were deflated) sorted into
- increasing order.
-
- ALPHA (input) DOUBLE PRECISION
- Contains the diagonal element associated with the added row.
-
- BETA (input) DOUBLE PRECISION
- Contains the off-diagonal element associated with the added
- row.
-
- U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
- On entry U contains the left singular vectors of two
- submatrices in the two square blocks with corners at (1,1),
- (NL, NL), and (NL+2, NL+2), (N,N).
- On exit U contains the trailing (N-K) updated left singular
- vectors (those which were deflated) in its last N-K columns.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= N.
-
- Z (output) DOUBLE PRECISION array, dimension(N)
- On exit Z contains the updating row vector in the secular
- equation.
-
- DSIGMA (output) DOUBLE PRECISION array, dimension (N)
- Contains a copy of the diagonal elements (K-1 singular values
- and one zero) in the secular equation.
-
- U2 (output) DOUBLE PRECISION array, dimension(LDU2,N)
- Contains a copy of the first K-1 left singular vectors which
- will be used by DLASD3 in a matrix multiply (DGEMM) to solve
- for the new left singular vectors. U2 is arranged into four
- blocks. The first block contains a column with 1 at NL+1 and
- zero everywhere else; the second block contains non-zero
- entries only at and above NL; the third contains non-zero
- entries only below NL+1; and the fourth is dense.
-
- LDU2 (input) INTEGER
- The leading dimension of the array U2. LDU2 >= N.
-
- VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
- On entry VT' contains the right singular vectors of two
- submatrices in the two square blocks with corners at (1,1),
- (NL+1, NL+1), and (NL+2, NL+2), (M,M).
- On exit VT' contains the trailing (N-K) updated right singular
- vectors (those which were deflated) in its last N-K columns.
- In case SQRE =1, the last row of VT spans the right null
- space.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT. LDVT >= M.
-
- VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N)
- VT2' contains a copy of the first K right singular vectors
- which will be used by DLASD3 in a matrix multiply (DGEMM) to
- solve for the new right singular vectors. VT2 is arranged into
- three blocks. The first block contains a row that corresponds
- to the special 0 diagonal element in SIGMA; the second block
- contains non-zeros only at and before NL +1; the third block
- contains non-zeros only at and after NL +2.
-
- LDVT2 (input) INTEGER
- The leading dimension of the array VT2. LDVT2 >= M.
-
- IDXP (workspace) INTEGER array, dimension(N)
- This will contain the permutation used to place deflated
- values of D at the end of the array. On output IDXP(2:K)
- points to the nondeflated D-values and IDXP(K+1:N)
- points to the deflated singular values.
-
- IDX (workspace) INTEGER array, dimension(N)
- This will contain the permutation used to sort the contents of
- D into ascending order.
-
- IDXC (output) INTEGER array, dimension(N)
- This will contain the permutation used to arrange the columns
- of the deflated U matrix into three groups: the first group
- contains non-zero entries only at and above NL, the second
- contains non-zero entries only below NL+2, and the third is
- dense.
-
- COLTYP (workspace/output) INTEGER array, dimension(N)
- As workspace, this will contain a label which will indicate
- which of the following types a column in the U2 matrix or a
- row in the VT2 matrix is:
- 1 : non-zero in the upper half only
- 2 : non-zero in the lower half only
- 3 : dense
- 4 : deflated
-
- On exit, it is an array of dimension 4, with COLTYP(I) being
- the dimension of the I-th type columns.
-
- IDXQ (input) INTEGER array, dimension(N)
- This contains the permutation which separately sorts the two
- sub-problems in D into ascending order. Note that entries in
- the first hlaf of this permutation must first be moved one
- position backward; and entries in the second half
- must first have NL+1 added to their values.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --z__;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- --dsigma;
- u2_dim1 = *ldu2;
- u2_offset = 1 + u2_dim1 * 1;
- u2 -= u2_offset;
- vt2_dim1 = *ldvt2;
- vt2_offset = 1 + vt2_dim1 * 1;
- vt2 -= vt2_offset;
- --idxp;
- --idx;
- --idxc;
- --idxq;
- --coltyp;
-
- /* Function Body */
- *info = 0;
-
- if (*nl < 1) {
- *info = -1;
- } else if (*nr < 1) {
- *info = -2;
- } else if ((*sqre != 1 && *sqre != 0)) {
- *info = -3;
- }
-
- n = *nl + *nr + 1;
- m = n + *sqre;
-
- if (*ldu < n) {
- *info = -10;
- } else if (*ldvt < m) {
- *info = -12;
- } else if (*ldu2 < n) {
- *info = -15;
- } else if (*ldvt2 < m) {
- *info = -17;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD2", &i__1);
- return 0;
- }
-
- nlp1 = *nl + 1;
- nlp2 = *nl + 2;
-
-/*
- Generate the first part of the vector Z; and move the singular
- values in the first part of D one position backward.
-*/
-
- z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
- z__[1] = z1;
- for (i__ = *nl; i__ >= 1; --i__) {
- z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
- d__[i__ + 1] = d__[i__];
- idxq[i__ + 1] = idxq[i__] + 1;
-/* L10: */
- }
-
-/* Generate the second part of the vector Z. */
-
- i__1 = m;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
-/* L20: */
- }
-
-/* Initialize some reference arrays. */
-
- i__1 = nlp1;
- for (i__ = 2; i__ <= i__1; ++i__) {
- coltyp[i__] = 1;
-/* L30: */
- }
- i__1 = n;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- coltyp[i__] = 2;
-/* L40: */
- }
-
-/* Sort the singular values into increasing order */
-
- i__1 = n;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- idxq[i__] += nlp1;
-/* L50: */
- }
-
-/*
- DSIGMA, IDXC, IDXC, and the first column of U2
- are used as storage space.
-*/
-
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- dsigma[i__] = d__[idxq[i__]];
- u2[i__ + u2_dim1] = z__[idxq[i__]];
- idxc[i__] = coltyp[idxq[i__]];
-/* L60: */
- }
-
- dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
-
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- idxi = idx[i__] + 1;
- d__[i__] = dsigma[idxi];
- z__[i__] = u2[idxi + u2_dim1];
- coltyp[i__] = idxc[idxi];
-/* L70: */
- }
-
-/* Calculate the allowable deflation tolerance */
-
- eps = EPSILON;
-/* Computing MAX */
- d__1 = abs(*alpha), d__2 = abs(*beta);
- tol = max(d__1,d__2);
-/* Computing MAX */
- d__2 = (d__1 = d__[n], abs(d__1));
- tol = eps * 8. * max(d__2,tol);
-
-/*
- There are 2 kinds of deflation -- first a value in the z-vector
- is small, second two (or more) singular values are very close
- together (their difference is small).
-
- If the value in the z-vector is small, we simply permute the
- array so that the corresponding singular value is moved to the
- end.
-
- If two values in the D-vector are close, we perform a two-sided
- rotation designed to make one of the corresponding z-vector
- entries zero, and then permute the array so that the deflated
- singular value is moved to the end.
-
- If there are multiple singular values then the problem deflates.
- Here the number of equal singular values are found. As each equal
- singular value is found, an elementary reflector is computed to
- rotate the corresponding singular subspace so that the
- corresponding components of Z are zero in this new basis.
-*/
-
- *k = 1;
- k2 = n + 1;
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- if ((d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- idxp[k2] = j;
- coltyp[j] = 4;
- if (j == n) {
- goto L120;
- }
- } else {
- jprev = j;
- goto L90;
- }
-/* L80: */
- }
-L90:
- j = jprev;
-L100:
- ++j;
- if (j > n) {
- goto L110;
- }
- if ((d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- idxp[k2] = j;
- coltyp[j] = 4;
- } else {
-
-/* Check if singular values are close enough to allow deflation. */
-
- if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
-
-/* Deflation is possible. */
-
- s = z__[jprev];
- c__ = z__[j];
-
-/*
- Find sqrt(a**2+b**2) without overflow or
- destructive underflow.
-*/
-
- tau = dlapy2_(&c__, &s);
- c__ /= tau;
- s = -s / tau;
- z__[j] = tau;
- z__[jprev] = 0.;
-
-/*
- Apply back the Givens rotation to the left and right
- singular vector matrices.
-*/
-
- idxjp = idxq[idx[jprev] + 1];
- idxj = idxq[idx[j] + 1];
- if (idxjp <= nlp1) {
- --idxjp;
- }
- if (idxj <= nlp1) {
- --idxj;
- }
- drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
- c__1, &c__, &s);
- drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
- c__, &s);
- if (coltyp[j] != coltyp[jprev]) {
- coltyp[j] = 3;
- }
- coltyp[jprev] = 4;
- --k2;
- idxp[k2] = jprev;
- jprev = j;
- } else {
- ++(*k);
- u2[*k + u2_dim1] = z__[jprev];
- dsigma[*k] = d__[jprev];
- idxp[*k] = jprev;
- jprev = j;
- }
- }
- goto L100;
-L110:
-
-/* Record the last singular value. */
-
- ++(*k);
- u2[*k + u2_dim1] = z__[jprev];
- dsigma[*k] = d__[jprev];
- idxp[*k] = jprev;
-
-L120:
-
-/*
- Count up the total number of the various types of columns, then
- form a permutation which positions the four column types into
- four groups of uniform structure (although one or more of these
- groups may be empty).
-*/
-
- for (j = 1; j <= 4; ++j) {
- ctot[j - 1] = 0;
-/* L130: */
- }
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- ct = coltyp[j];
- ++ctot[ct - 1];
-/* L140: */
- }
-
-/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
-
- psm[0] = 2;
- psm[1] = ctot[0] + 2;
- psm[2] = psm[1] + ctot[1];
- psm[3] = psm[2] + ctot[2];
-
-/*
- Fill out the IDXC array so that the permutation which it induces
- will place all type-1 columns first, all type-2 columns next,
- then all type-3's, and finally all type-4's, starting from the
- second column. This applies similarly to the rows of VT.
-*/
-
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- jp = idxp[j];
- ct = coltyp[jp];
- idxc[psm[ct - 1]] = j;
- ++psm[ct - 1];
-/* L150: */
- }
-
-/*
- Sort the singular values and corresponding singular vectors into
- DSIGMA, U2, and VT2 respectively. The singular values/vectors
- which were not deflated go into the first K slots of DSIGMA, U2,
- and VT2 respectively, while those which were deflated go into the
- last N - K slots, except that the first column/row will be treated
- separately.
-*/
-
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- jp = idxp[j];
- dsigma[j] = d__[jp];
- idxj = idxq[idx[idxp[idxc[j]]] + 1];
- if (idxj <= nlp1) {
- --idxj;
- }
- dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
- dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
-/* L160: */
- }
-
-/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
-
- dsigma[1] = 0.;
- hlftol = tol / 2.;
- if (abs(dsigma[2]) <= hlftol) {
- dsigma[2] = hlftol;
- }
- if (m > n) {
- z__[1] = dlapy2_(&z1, &z__[m]);
- if (z__[1] <= tol) {
- c__ = 1.;
- s = 0.;
- z__[1] = tol;
- } else {
- c__ = z1 / z__[1];
- s = z__[m] / z__[1];
- }
- } else {
- if (abs(z1) <= tol) {
- z__[1] = tol;
- } else {
- z__[1] = z1;
- }
- }
-
-/* Move the rest of the updating row to Z. */
-
- i__1 = *k - 1;
- dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
-
-/*
- Determine the first column of U2, the first row of VT2 and the
- last row of VT.
-*/
-
- dlaset_("A", &n, &c__1, &c_b29, &c_b29, &u2[u2_offset], ldu2);
- u2[nlp1 + u2_dim1] = 1.;
- if (m > n) {
- i__1 = nlp1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
- vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
-/* L170: */
- }
- i__1 = m;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
- vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
-/* L180: */
- }
- } else {
- dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
- }
- if (m > n) {
- dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
- }
-
-/*
- The deflated singular values and their corresponding vectors go
- into the back of D, U, and V respectively.
-*/
-
- if (n > *k) {
- i__1 = n - *k;
- dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
- i__1 = n - *k;
- dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
- * u_dim1 + 1], ldu);
- i__1 = n - *k;
- dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
- vt_dim1], ldvt);
- }
-
-/* Copy CTOT into COLTYP for referencing in DLASD3. */
-
- for (j = 1; j <= 4; ++j) {
- coltyp[j] = ctot[j - 1];
-/* L190: */
- }
-
- return 0;
-
-/* End of DLASD2 */
-
-} /* dlasd2_ */
-
-/* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer
- *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma,
- doublereal *u, integer *ldu, doublereal *u2, integer *ldu2,
- doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2,
- integer *idxc, integer *ctot, doublereal *z__, integer *info)
-{
- /* System generated locals */
- integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
- vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer i__, j, m, n, jc;
- static doublereal rho;
- static integer nlp1, nlp2, nrp1;
- static doublereal temp;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- static integer ctemp;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer ktemp;
- extern doublereal dlamc3_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *), dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlacpy_(char *, integer *, integer
- *, doublereal *, integer *, doublereal *, integer *),
- xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASD3 finds all the square roots of the roots of the secular
- equation, as defined by the values in D and Z. It makes the
- appropriate calls to DLASD4 and then updates the singular
- vectors by matrix multiplication.
-
- This code makes very mild assumptions about floating point
- arithmetic. It will work on machines with a guard digit in
- add/subtract, or on those binary machines without guard digits
- which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
- It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- DLASD3 is called from DLASD1.
-
- Arguments
- =========
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has N = NL + NR + 1 rows and
- M = N + SQRE >= N columns.
-
- K (input) INTEGER
- The size of the secular equation, 1 =< K = < N.
-
- D (output) DOUBLE PRECISION array, dimension(K)
- On exit the square roots of the roots of the secular equation,
- in ascending order.
-
- Q (workspace) DOUBLE PRECISION array,
- dimension at least (LDQ,K).
-
- LDQ (input) INTEGER
- The leading dimension of the array Q. LDQ >= K.
-
- DSIGMA (input) DOUBLE PRECISION array, dimension(K)
- The first K elements of this array contain the old roots
- of the deflated updating problem. These are the poles
- of the secular equation.
-
- U (input) DOUBLE PRECISION array, dimension (LDU, N)
- The last N - K columns of this matrix contain the deflated
- left singular vectors.
-
- LDU (input) INTEGER
- The leading dimension of the array U. LDU >= N.
-
- U2 (input) DOUBLE PRECISION array, dimension (LDU2, N)
- The first K columns of this matrix contain the non-deflated
- left singular vectors for the split problem.
-
- LDU2 (input) INTEGER
- The leading dimension of the array U2. LDU2 >= N.
-
- VT (input) DOUBLE PRECISION array, dimension (LDVT, M)
- The last M - K columns of VT' contain the deflated
- right singular vectors.
-
- LDVT (input) INTEGER
- The leading dimension of the array VT. LDVT >= N.
-
- VT2 (input) DOUBLE PRECISION array, dimension (LDVT2, N)
- The first K columns of VT2' contain the non-deflated
- right singular vectors for the split problem.
-
- LDVT2 (input) INTEGER
- The leading dimension of the array VT2. LDVT2 >= N.
-
- IDXC (input) INTEGER array, dimension ( N )
- The permutation used to arrange the columns of U (and rows of
- VT) into three groups: the first group contains non-zero
- entries only at and above (or before) NL +1; the second
- contains non-zero entries only at and below (or after) NL+2;
- and the third is dense. The first column of U and the row of
- VT are treated separately, however.
-
- The rows of the singular vectors found by DLASD4
- must be likewise permuted before the matrix multiplies can
- take place.
-
- CTOT (input) INTEGER array, dimension ( 4 )
- A count of the total number of the various types of columns
- in U (or rows in VT), as described in IDXC. The fourth column
- type is any column which has been deflated.
-
- Z (input) DOUBLE PRECISION array, dimension (K)
- The first K elements of this array contain the components
- of the deflation-adjusted updating row vector.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
- q -= q_offset;
- --dsigma;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- u2_dim1 = *ldu2;
- u2_offset = 1 + u2_dim1 * 1;
- u2 -= u2_offset;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- vt2_dim1 = *ldvt2;
- vt2_offset = 1 + vt2_dim1 * 1;
- vt2 -= vt2_offset;
- --idxc;
- --ctot;
- --z__;
-
- /* Function Body */
- *info = 0;
-
- if (*nl < 1) {
- *info = -1;
- } else if (*nr < 1) {
- *info = -2;
- } else if ((*sqre != 1 && *sqre != 0)) {
- *info = -3;
- }
-
- n = *nl + *nr + 1;
- m = n + *sqre;
- nlp1 = *nl + 1;
- nlp2 = *nl + 2;
-
- if (*k < 1 || *k > n) {
- *info = -4;
- } else if (*ldq < *k) {
- *info = -7;
- } else if (*ldu < n) {
- *info = -10;
- } else if (*ldu2 < n) {
- *info = -12;
- } else if (*ldvt < m) {
- *info = -14;
- } else if (*ldvt2 < m) {
- *info = -16;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD3", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*k == 1) {
- d__[1] = abs(z__[1]);
- dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
- if (z__[1] > 0.) {
- dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
- } else {
- i__1 = n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- u[i__ + u_dim1] = -u2[i__ + u2_dim1];
-/* L10: */
- }
- }
- return 0;
- }
-
-/*
- Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
- be computed with high relative accuracy (barring over/underflow).
- This is a problem on machines without a guard digit in
- add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
- The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
- which on any of these machines zeros out the bottommost
- bit of DSIGMA(I) if it is 1; this makes the subsequent
- subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
- occurs. On binary machines with a guard digit (almost all
- machines) it does not change DSIGMA(I) at all. On hexadecimal
- and decimal machines with a guard digit, it slightly
- changes the bottommost bits of DSIGMA(I). It does not account
- for hexadecimal or decimal machines without guard digits
- (we know of none). We use a subroutine call to compute
- 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
- this code.
-*/
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
-/* L20: */
- }
-
-/* Keep a copy of Z. */
-
- dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
-
-/* Normalize Z. */
-
- rho = dnrm2_(k, &z__[1], &c__1);
- dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info);
- rho *= rho;
-
-/* Find the new singular values. */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
- &vt[j * vt_dim1 + 1], info);
-
-/* If the zero finder fails, the computation is terminated. */
-
- if (*info != 0) {
- return 0;
- }
-/* L30: */
- }
-
-/* Compute updated Z. */
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
- i__2 = i__ - 1;
- for (j = 1; j <= i__2; ++j) {
- z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
- i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
-/* L40: */
- }
- i__2 = *k - 1;
- for (j = i__; j <= i__2; ++j) {
- z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
- i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
-/* L50: */
- }
- d__2 = sqrt((d__1 = z__[i__], abs(d__1)));
- z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]);
-/* L60: */
- }
-
-/*
- Compute left singular vectors of the modified diagonal matrix,
- and store related information for the right singular vectors.
-*/
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
- vt_dim1 + 1];
- u[i__ * u_dim1 + 1] = -1.;
- i__2 = *k;
- for (j = 2; j <= i__2; ++j) {
- vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
- * vt_dim1];
- u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
-/* L70: */
- }
- temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
- q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
- i__2 = *k;
- for (j = 2; j <= i__2; ++j) {
- jc = idxc[j];
- q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
-/* L80: */
- }
-/* L90: */
- }
-
-/* Update the left singular vector matrix. */
-
- if (*k == 2) {
- dgemm_("N", "N", &n, k, k, &c_b15, &u2[u2_offset], ldu2, &q[q_offset],
- ldq, &c_b29, &u[u_offset], ldu);
- goto L100;
- }
- if (ctot[1] > 0) {
- dgemm_("N", "N", nl, k, &ctot[1], &c_b15, &u2[((u2_dim1) << (1)) + 1],
- ldu2, &q[q_dim1 + 2], ldq, &c_b29, &u[u_dim1 + 1], ldu);
- if (ctot[3] > 0) {
- ktemp = ctot[1] + 2 + ctot[2];
- dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1]
- , ldu2, &q[ktemp + q_dim1], ldq, &c_b15, &u[u_dim1 + 1],
- ldu);
- }
- } else if (ctot[3] > 0) {
- ktemp = ctot[1] + 2 + ctot[2];
- dgemm_("N", "N", nl, k, &ctot[3], &c_b15, &u2[ktemp * u2_dim1 + 1],
- ldu2, &q[ktemp + q_dim1], ldq, &c_b29, &u[u_dim1 + 1], ldu);
- } else {
- dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
- }
- dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
- ktemp = ctot[1] + 2;
- ctemp = ctot[2] + ctot[3];
- dgemm_("N", "N", nr, k, &ctemp, &c_b15, &u2[nlp2 + ktemp * u2_dim1], ldu2,
- &q[ktemp + q_dim1], ldq, &c_b29, &u[nlp2 + u_dim1], ldu);
-
-/* Generate the right singular vectors. */
-
-L100:
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
- q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
- i__2 = *k;
- for (j = 2; j <= i__2; ++j) {
- jc = idxc[j];
- q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
-/* L110: */
- }
-/* L120: */
- }
-
-/* Update the right singular vector matrix. */
-
- if (*k == 2) {
- dgemm_("N", "N", k, &m, k, &c_b15, &q[q_offset], ldq, &vt2[vt2_offset]
- , ldvt2, &c_b29, &vt[vt_offset], ldvt);
- return 0;
- }
- ktemp = ctot[1] + 1;
- dgemm_("N", "N", k, &nlp1, &ktemp, &c_b15, &q[q_dim1 + 1], ldq, &vt2[
- vt2_dim1 + 1], ldvt2, &c_b29, &vt[vt_dim1 + 1], ldvt);
- ktemp = ctot[1] + 2 + ctot[2];
- if (ktemp <= *ldvt2) {
- dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b15, &q[ktemp * q_dim1 + 1],
- ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b15, &vt[vt_dim1 + 1],
- ldvt);
- }
-
- ktemp = ctot[1] + 1;
- nrp1 = *nr + *sqre;
- if (ktemp > 1) {
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
-/* L130: */
- }
- i__1 = m;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
-/* L140: */
- }
- }
- ctemp = ctot[2] + 1 + ctot[3];
- dgemm_("N", "N", k, &nrp1, &ctemp, &c_b15, &q[ktemp * q_dim1 + 1], ldq, &
- vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b29, &vt[nlp2 * vt_dim1 +
- 1], ldvt);
-
- return 0;
-
-/* End of DLASD3 */
-
-} /* dlasd3_ */
-
-
-/* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__,
- doublereal *z__, doublereal *delta, doublereal *rho, doublereal *
- sigma, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal a, b, c__;
- static integer j;
- static doublereal w, dd[3];
- static integer ii;
- static doublereal dw, zz[3];
- static integer ip1;
- static doublereal eta, phi, eps, tau, psi;
- static integer iim1, iip1;
- static doublereal dphi, dpsi;
- static integer iter;
- static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq,
- dtiip;
- static integer niter;
- static doublereal dtisq;
- static logical swtch;
- static doublereal dtnsq;
- extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *)
- , dlasd5_(integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *);
- static doublereal delsq2, dtnsq1;
- static logical swtch3;
-
- static logical orgati;
- static doublereal erretm, dtipsq, rhoinv;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- This subroutine computes the square root of the I-th updated
- eigenvalue of a positive symmetric rank-one modification to
- a positive diagonal matrix whose entries are given as the squares
- of the corresponding entries in the array d, and that
-
- 0 <= D(i) < D(j) for i < j
-
- and that RHO > 0. This is arranged by the calling routine, and is
- no loss in generality. The rank-one modified system is thus
-
- diag( D ) * diag( D ) + RHO * Z * Z_transpose.
-
- where we assume the Euclidean norm of Z is 1.
-
- The method consists of approximating the rational functions in the
- secular equation by simpler interpolating rational functions.
-
- Arguments
- =========
-
- N (input) INTEGER
- The length of all arrays.
-
- I (input) INTEGER
- The index of the eigenvalue to be computed. 1 <= I <= N.
-
- D (input) DOUBLE PRECISION array, dimension ( N )
- The original eigenvalues. It is assumed that they are in
- order, 0 <= D(I) < D(J) for I < J.
-
- Z (input) DOUBLE PRECISION array, dimension ( N )
- The components of the updating vector.
-
- DELTA (output) DOUBLE PRECISION array, dimension ( N )
- If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th
- component. If N = 1, then DELTA(1) = 1. The vector DELTA
- contains the information necessary to construct the
- (singular) eigenvectors.
-
- RHO (input) DOUBLE PRECISION
- The scalar in the symmetric updating formula.
-
- SIGMA (output) DOUBLE PRECISION
- The computed lambda_I, the I-th updated eigenvalue.
-
- WORK (workspace) DOUBLE PRECISION array, dimension ( N )
- If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th
- component. If N = 1, then WORK( 1 ) = 1.
-
- INFO (output) INTEGER
- = 0: successful exit
- > 0: if INFO = 1, the updating process failed.
-
- Internal Parameters
- ===================
-
- Logical variable ORGATI (origin-at-i?) is used for distinguishing
- whether D(i) or D(i+1) is treated as the origin.
-
- ORGATI = .true. origin at i
- ORGATI = .false. origin at i+1
-
- Logical variable SWTCH3 (switch-for-3-poles?) is for noting
- if we are working with THREE poles!
-
- MAXIT is the maximum number of iterations allowed for each
- eigenvalue.
-
- Further Details
- ===============
-
- Based on contributions by
- Ren-Cang Li, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-
-
- Since this routine is called in an inner loop, we do no argument
- checking.
-
- Quick return for N=1 and 2.
-*/
-
- /* Parameter adjustments */
- --work;
- --delta;
- --z__;
- --d__;
-
- /* Function Body */
- *info = 0;
- if (*n == 1) {
-
-/* Presumably, I=1 upon entry */
-
- *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
- delta[1] = 1.;
- work[1] = 1.;
- return 0;
- }
- if (*n == 2) {
- dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
- return 0;
- }
-
-/* Compute machine epsilon */
-
- eps = EPSILON;
- rhoinv = 1. / *rho;
-
-/* The case I = N */
-
- if (*i__ == *n) {
-
-/* Initialize some basic variables */
-
- ii = *n - 1;
- niter = 1;
-
-/* Calculate initial guess */
-
- temp = *rho / 2.;
-
-/*
- If ||Z||_2 is not one, then TEMP should be set to
- RHO * ||Z||_2^2 / TWO
-*/
-
- temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] = d__[j] + d__[*n] + temp1;
- delta[j] = d__[j] - d__[*n] - temp1;
-/* L10: */
- }
-
- psi = 0.;
- i__1 = *n - 2;
- for (j = 1; j <= i__1; ++j) {
- psi += z__[j] * z__[j] / (delta[j] * work[j]);
-/* L20: */
- }
-
- c__ = rhoinv + psi;
- w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
- n] / (delta[*n] * work[*n]);
-
- if (w <= 0.) {
- temp1 = sqrt(d__[*n] * d__[*n] + *rho);
- temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
- n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
- z__[*n] / *rho;
-
-/*
- The following TAU is to approximate
- SIGMA_n^2 - D( N )*D( N )
-*/
-
- if (c__ <= temp) {
- tau = *rho;
- } else {
- delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
- a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
- n];
- b = z__[*n] * z__[*n] * delsq;
- if (a < 0.) {
- tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
- } else {
- tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
- }
- }
-
-/*
- It can be proved that
- D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO
-*/
-
- } else {
- delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
- a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
- b = z__[*n] * z__[*n] * delsq;
-
-/*
- The following TAU is to approximate
- SIGMA_n^2 - D( N )*D( N )
-*/
-
- if (a < 0.) {
- tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
- } else {
- tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
- }
-
-/*
- It can be proved that
- D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2
-*/
-
- }
-
-/* The following ETA is to approximate SIGMA_n - D( N ) */
-
- eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
-
- *sigma = d__[*n] + eta;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] = d__[j] - d__[*i__] - eta;
- work[j] = d__[j] + d__[*i__] + eta;
-/* L30: */
- }
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (delta[j] * work[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L40: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / (delta[*n] * work[*n]);
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
- + dphi);
-
- w = rhoinv + phi + psi;
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- goto L240;
- }
-
-/* Calculate the new step */
-
- ++niter;
- dtnsq1 = work[*n - 1] * delta[*n - 1];
- dtnsq = work[*n] * delta[*n];
- c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
- a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
- b = dtnsq * dtnsq1 * w;
- if (c__ < 0.) {
- c__ = abs(c__);
- }
- if (c__ == 0.) {
- eta = *rho - *sigma * *sigma;
- } else if (a >= 0.) {
- eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
- * 2.);
- } else {
- eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
- );
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta > 0.) {
- eta = -w / (dpsi + dphi);
- }
- temp = eta - dtnsq;
- if (temp > *rho) {
- eta = *rho + dtnsq;
- }
-
- tau += eta;
- eta /= *sigma + sqrt(eta + *sigma * *sigma);
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
- work[j] += eta;
-/* L50: */
- }
-
- *sigma += eta;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (work[j] * delta[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L60: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / (work[*n] * delta[*n]);
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
- + dphi);
-
- w = rhoinv + phi + psi;
-
-/* Main loop to update the values of the array DELTA */
-
- iter = niter + 1;
-
- for (niter = iter; niter <= MAXITERLOOPS; ++niter) {
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- goto L240;
- }
-
-/* Calculate the new step */
-
- dtnsq1 = work[*n - 1] * delta[*n - 1];
- dtnsq = work[*n] * delta[*n];
- c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
- a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
- b = dtnsq1 * dtnsq * w;
- if (a >= 0.) {
- eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- } else {
- eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta > 0.) {
- eta = -w / (dpsi + dphi);
- }
- temp = eta - dtnsq;
- if (temp <= 0.) {
- eta /= 2.;
- }
-
- tau += eta;
- eta /= *sigma + sqrt(eta + *sigma * *sigma);
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- delta[j] -= eta;
- work[j] += eta;
-/* L70: */
- }
-
- *sigma += eta;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = ii;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (work[j] * delta[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L80: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- temp = z__[*n] / (work[*n] * delta[*n]);
- phi = z__[*n] * temp;
- dphi = temp * temp;
- erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
- dpsi + dphi);
-
- w = rhoinv + phi + psi;
-/* L90: */
- }
-
-/* Return with INFO = 1, NITER = MAXIT and not converged */
-
- *info = 1;
- goto L240;
-
-/* End for the case I = N */
-
- } else {
-
-/* The case for I < N */
-
- niter = 1;
- ip1 = *i__ + 1;
-
-/* Calculate initial guess */
-
- delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
- delsq2 = delsq / 2.;
- temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] = d__[j] + d__[*i__] + temp;
- delta[j] = d__[j] - d__[*i__] - temp;
-/* L100: */
- }
-
- psi = 0.;
- i__1 = *i__ - 1;
- for (j = 1; j <= i__1; ++j) {
- psi += z__[j] * z__[j] / (work[j] * delta[j]);
-/* L110: */
- }
-
- phi = 0.;
- i__1 = *i__ + 2;
- for (j = *n; j >= i__1; --j) {
- phi += z__[j] * z__[j] / (work[j] * delta[j]);
-/* L120: */
- }
- c__ = rhoinv + psi + phi;
- w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
- ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
-
- if (w > 0.) {
-
-/*
- d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2
-
- We choose d(i) as origin.
-*/
-
- orgati = TRUE_;
- sg2lb = 0.;
- sg2ub = delsq2;
- a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
- b = z__[*i__] * z__[*i__] * delsq;
- if (a > 0.) {
- tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- } else {
- tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- }
-
-/*
- TAU now is an estimation of SIGMA^2 - D( I )^2. The
- following, however, is the corresponding estimation of
- SIGMA - D( I ).
-*/
-
- eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
- } else {
-
-/*
- (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2
-
- We choose d(i+1) as origin.
-*/
-
- orgati = FALSE_;
- sg2lb = -delsq2;
- sg2ub = 0.;
- a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
- b = z__[ip1] * z__[ip1] * delsq;
- if (a < 0.) {
- tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
- d__1))));
- } else {
- tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
- (c__ * 2.);
- }
-
-/*
- TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The
- following, however, is the corresponding estimation of
- SIGMA - D( IP1 ).
-*/
-
- eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau,
- abs(d__1))));
- }
-
- if (orgati) {
- ii = *i__;
- *sigma = d__[*i__] + eta;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] = d__[j] + d__[*i__] + eta;
- delta[j] = d__[j] - d__[*i__] - eta;
-/* L130: */
- }
- } else {
- ii = *i__ + 1;
- *sigma = d__[ip1] + eta;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] = d__[j] + d__[ip1] + eta;
- delta[j] = d__[j] - d__[ip1] - eta;
-/* L140: */
- }
- }
- iim1 = ii - 1;
- iip1 = ii + 1;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (work[j] * delta[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L150: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / (work[j] * delta[j]);
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L160: */
- }
-
- w = rhoinv + phi + psi;
-
-/*
- W is the value of the secular function with
- its ii-th element removed.
-*/
-
- swtch3 = FALSE_;
- if (orgati) {
- if (w < 0.) {
- swtch3 = TRUE_;
- }
- } else {
- if (w > 0.) {
- swtch3 = TRUE_;
- }
- }
- if (ii == 1 || ii == *n) {
- swtch3 = FALSE_;
- }
-
- temp = z__[ii] / (work[ii] * delta[ii]);
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w += temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
- abs(tau) * dw;
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- goto L240;
- }
-
- if (w <= 0.) {
- sg2lb = max(sg2lb,tau);
- } else {
- sg2ub = min(sg2ub,tau);
- }
-
-/* Calculate the new step */
-
- ++niter;
- if (! swtch3) {
- dtipsq = work[ip1] * delta[ip1];
- dtisq = work[*i__] * delta[*i__];
- if (orgati) {
-/* Computing 2nd power */
- d__1 = z__[*i__] / dtisq;
- c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
- } else {
-/* Computing 2nd power */
- d__1 = z__[ip1] / dtipsq;
- c__ = w - dtisq * dw - delsq * (d__1 * d__1);
- }
- a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
- b = dtipsq * dtisq * w;
- if (c__ == 0.) {
- if (a == 0.) {
- if (orgati) {
- a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
- dphi);
- } else {
- a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
- dphi);
- }
- }
- eta = b / a;
- } else if (a <= 0.) {
- eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
- c__ * 2.);
- } else {
- eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
- d__1))));
- }
- } else {
-
-/* Interpolation using THREE most relevant poles */
-
- dtiim = work[iim1] * delta[iim1];
- dtiip = work[iip1] * delta[iip1];
- temp = rhoinv + psi + phi;
- if (orgati) {
- temp1 = z__[iim1] / dtiim;
- temp1 *= temp1;
- c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
- (d__[iim1] + d__[iip1]) * temp1;
- zz[0] = z__[iim1] * z__[iim1];
- if (dpsi < temp1) {
- zz[2] = dtiip * dtiip * dphi;
- } else {
- zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
- }
- } else {
- temp1 = z__[iip1] / dtiip;
- temp1 *= temp1;
- c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
- (d__[iim1] + d__[iip1]) * temp1;
- if (dphi < temp1) {
- zz[0] = dtiim * dtiim * dpsi;
- } else {
- zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
- }
- zz[2] = z__[iip1] * z__[iip1];
- }
- zz[1] = z__[ii] * z__[ii];
- dd[0] = dtiim;
- dd[1] = delta[ii] * work[ii];
- dd[2] = dtiip;
- dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
- if (*info != 0) {
- goto L240;
- }
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta >= 0.) {
- eta = -w / dw;
- }
- if (orgati) {
- temp1 = work[*i__] * delta[*i__];
- temp = eta - temp1;
- } else {
- temp1 = work[ip1] * delta[ip1];
- temp = eta - temp1;
- }
- if (temp > sg2ub || temp < sg2lb) {
- if (w < 0.) {
- eta = (sg2ub - tau) / 2.;
- } else {
- eta = (sg2lb - tau) / 2.;
- }
- }
-
- tau += eta;
- eta /= *sigma + sqrt(*sigma * *sigma + eta);
-
- prew = w;
-
- *sigma += eta;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] += eta;
- delta[j] -= eta;
-/* L170: */
- }
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (work[j] * delta[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L180: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / (work[j] * delta[j]);
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L190: */
- }
-
- temp = z__[ii] / (work[ii] * delta[ii]);
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w = rhoinv + phi + psi + temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
- abs(tau) * dw;
-
- if (w <= 0.) {
- sg2lb = max(sg2lb,tau);
- } else {
- sg2ub = min(sg2ub,tau);
- }
-
- swtch = FALSE_;
- if (orgati) {
- if (-w > abs(prew) / 10.) {
- swtch = TRUE_;
- }
- } else {
- if (w > abs(prew) / 10.) {
- swtch = TRUE_;
- }
- }
-
-/* Main loop to update the values of the array DELTA and WORK */
-
- iter = niter + 1;
-
- for (niter = iter; niter <= MAXITERLOOPS; ++niter) {
-
-/* Test for convergence */
-
- if (abs(w) <= eps * erretm) {
- goto L240;
- }
-
-/* Calculate the new step */
-
- if (! swtch3) {
- dtipsq = work[ip1] * delta[ip1];
- dtisq = work[*i__] * delta[*i__];
- if (! swtch) {
- if (orgati) {
-/* Computing 2nd power */
- d__1 = z__[*i__] / dtisq;
- c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
- } else {
-/* Computing 2nd power */
- d__1 = z__[ip1] / dtipsq;
- c__ = w - dtisq * dw - delsq * (d__1 * d__1);
- }
- } else {
- temp = z__[ii] / (work[ii] * delta[ii]);
- if (orgati) {
- dpsi += temp * temp;
- } else {
- dphi += temp * temp;
- }
- c__ = w - dtisq * dpsi - dtipsq * dphi;
- }
- a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
- b = dtipsq * dtisq * w;
- if (c__ == 0.) {
- if (a == 0.) {
- if (! swtch) {
- if (orgati) {
- a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
- (dpsi + dphi);
- } else {
- a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
- dpsi + dphi);
- }
- } else {
- a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
- }
- }
- eta = b / a;
- } else if (a <= 0.) {
- eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
- / (c__ * 2.);
- } else {
- eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
- abs(d__1))));
- }
- } else {
-
-/* Interpolation using THREE most relevant poles */
-
- dtiim = work[iim1] * delta[iim1];
- dtiip = work[iip1] * delta[iip1];
- temp = rhoinv + psi + phi;
- if (swtch) {
- c__ = temp - dtiim * dpsi - dtiip * dphi;
- zz[0] = dtiim * dtiim * dpsi;
- zz[2] = dtiip * dtiip * dphi;
- } else {
- if (orgati) {
- temp1 = z__[iim1] / dtiim;
- temp1 *= temp1;
- temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
- iip1]) * temp1;
- c__ = temp - dtiip * (dpsi + dphi) - temp2;
- zz[0] = z__[iim1] * z__[iim1];
- if (dpsi < temp1) {
- zz[2] = dtiip * dtiip * dphi;
- } else {
- zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
- }
- } else {
- temp1 = z__[iip1] / dtiip;
- temp1 *= temp1;
- temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
- iip1]) * temp1;
- c__ = temp - dtiim * (dpsi + dphi) - temp2;
- if (dphi < temp1) {
- zz[0] = dtiim * dtiim * dpsi;
- } else {
- zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
- }
- zz[2] = z__[iip1] * z__[iip1];
- }
- }
- dd[0] = dtiim;
- dd[1] = delta[ii] * work[ii];
- dd[2] = dtiip;
- dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
- if (*info != 0) {
- goto L240;
- }
- }
-
-/*
- Note, eta should be positive if w is negative, and
- eta should be negative otherwise. However,
- if for some reason caused by roundoff, eta*w > 0,
- we simply use one Newton step instead. This way
- will guarantee eta*w < 0.
-*/
-
- if (w * eta >= 0.) {
- eta = -w / dw;
- }
- if (orgati) {
- temp1 = work[*i__] * delta[*i__];
- temp = eta - temp1;
- } else {
- temp1 = work[ip1] * delta[ip1];
- temp = eta - temp1;
- }
- if (temp > sg2ub || temp < sg2lb) {
- if (w < 0.) {
- eta = (sg2ub - tau) / 2.;
- } else {
- eta = (sg2lb - tau) / 2.;
- }
- }
-
- tau += eta;
- eta /= *sigma + sqrt(*sigma * *sigma + eta);
-
- *sigma += eta;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- work[j] += eta;
- delta[j] -= eta;
-/* L200: */
- }
-
- prew = w;
-
-/* Evaluate PSI and the derivative DPSI */
-
- dpsi = 0.;
- psi = 0.;
- erretm = 0.;
- i__1 = iim1;
- for (j = 1; j <= i__1; ++j) {
- temp = z__[j] / (work[j] * delta[j]);
- psi += z__[j] * temp;
- dpsi += temp * temp;
- erretm += psi;
-/* L210: */
- }
- erretm = abs(erretm);
-
-/* Evaluate PHI and the derivative DPHI */
-
- dphi = 0.;
- phi = 0.;
- i__1 = iip1;
- for (j = *n; j >= i__1; --j) {
- temp = z__[j] / (work[j] * delta[j]);
- phi += z__[j] * temp;
- dphi += temp * temp;
- erretm += phi;
-/* L220: */
- }
-
- temp = z__[ii] / (work[ii] * delta[ii]);
- dw = dpsi + dphi + temp * temp;
- temp = z__[ii] * temp;
- w = rhoinv + phi + psi + temp;
- erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
- + abs(tau) * dw;
- if ((w * prew > 0. && abs(w) > abs(prew) / 10.)) {
- swtch = ! swtch;
- }
-
- if (w <= 0.) {
- sg2lb = max(sg2lb,tau);
- } else {
- sg2ub = min(sg2ub,tau);
- }
-
-/* L230: */
- }
-
-/* Return with INFO = 1, NITER = MAXIT and not converged */
-
- *info = 1;
-
- }
-
-L240:
- return 0;
-
-/* End of DLASD4 */
-
-} /* dlasd4_ */
-
-/* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__,
- doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal *
- work)
-{
- /* System generated locals */
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal b, c__, w, del, tau, delsq;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- This subroutine computes the square root of the I-th eigenvalue
- of a positive symmetric rank-one modification of a 2-by-2 diagonal
- matrix
-
- diag( D ) * diag( D ) + RHO * Z * transpose(Z) .
-
- The diagonal entries in the array D are assumed to satisfy
-
- 0 <= D(i) < D(j) for i < j .
-
- We also assume RHO > 0 and that the Euclidean norm of the vector
- Z is one.
-
- Arguments
- =========
-
- I (input) INTEGER
- The index of the eigenvalue to be computed. I = 1 or I = 2.
-
- D (input) DOUBLE PRECISION array, dimension ( 2 )
- The original eigenvalues. We assume 0 <= D(1) < D(2).
-
- Z (input) DOUBLE PRECISION array, dimension ( 2 )
- The components of the updating vector.
-
- DELTA (output) DOUBLE PRECISION array, dimension ( 2 )
- Contains (D(j) - lambda_I) in its j-th component.
- The vector DELTA contains the information necessary
- to construct the eigenvectors.
-
- RHO (input) DOUBLE PRECISION
- The scalar in the symmetric updating formula.
-
- DSIGMA (output) DOUBLE PRECISION
- The computed lambda_I, the I-th updated eigenvalue.
-
- WORK (workspace) DOUBLE PRECISION array, dimension ( 2 )
- WORK contains (D(j) + sigma_I) in its j-th component.
-
- Further Details
- ===============
-
- Based on contributions by
- Ren-Cang Li, Computer Science Division, University of California
- at Berkeley, USA
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --work;
- --delta;
- --z__;
- --d__;
-
- /* Function Body */
- del = d__[2] - d__[1];
- delsq = del * (d__[2] + d__[1]);
- if (*i__ == 1) {
- w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] *
- z__[1] / (d__[1] * 3. + d__[2])) / del + 1.;
- if (w > 0.) {
- b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[1] * z__[1] * delsq;
-
-/*
- B > ZERO, always
-
- The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 )
-*/
-
- tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
-
-/* The following TAU is DSIGMA - D( 1 ) */
-
- tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
- *dsigma = d__[1] + tau;
- delta[1] = -tau;
- delta[2] = del - tau;
- work[1] = d__[1] * 2. + tau;
- work[2] = d__[1] + tau + d__[2];
-/*
- DELTA( 1 ) = -Z( 1 ) / TAU
- DELTA( 2 ) = Z( 2 ) / ( DEL-TAU )
-*/
- } else {
- b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[2] * z__[2] * delsq;
-
-/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
-
- if (b > 0.) {
- tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
- } else {
- tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
- }
-
-/* The following TAU is DSIGMA - D( 2 ) */
-
- tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1)));
- *dsigma = d__[2] + tau;
- delta[1] = -(del + tau);
- delta[2] = -tau;
- work[1] = d__[1] + tau + d__[2];
- work[2] = d__[2] * 2. + tau;
-/*
- DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
- DELTA( 2 ) = -Z( 2 ) / TAU
-*/
- }
-/*
- TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
- DELTA( 1 ) = DELTA( 1 ) / TEMP
- DELTA( 2 ) = DELTA( 2 ) / TEMP
-*/
- } else {
-
-/* Now I=2 */
-
- b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
- c__ = *rho * z__[2] * z__[2] * delsq;
-
-/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
-
- if (b > 0.) {
- tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
- } else {
- tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
- }
-
-/* The following TAU is DSIGMA - D( 2 ) */
-
- tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
- *dsigma = d__[2] + tau;
- delta[1] = -(del + tau);
- delta[2] = -tau;
- work[1] = d__[1] + tau + d__[2];
- work[2] = d__[2] * 2. + tau;
-/*
- DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
- DELTA( 2 ) = -Z( 2 ) / TAU
- TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
- DELTA( 1 ) = DELTA( 1 ) / TEMP
- DELTA( 2 ) = DELTA( 2 ) / TEMP
-*/
- }
- return 0;
-
-/* End of DLASD5 */
-
-} /* dlasd5_ */
-
-/* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr,
- integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl,
- doublereal *alpha, doublereal *beta, integer *idxq, integer *perm,
- integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
- integer *ldgnum, doublereal *poles, doublereal *difl, doublereal *
- difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s,
- doublereal *work, integer *iwork, integer *info)
-{
- /* System generated locals */
- integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
- poles_dim1, poles_offset, i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *), dlasd7_(integer *, integer *, integer *,
- integer *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, integer *, integer *,
- integer *, integer *, integer *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), dlasd8_(
- integer *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, integer *, doublereal *,
- doublereal *, integer *), dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlamrg_(integer *, integer *,
- doublereal *, integer *, integer *, integer *);
- static integer isigma;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal orgnrm;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASD6 computes the SVD of an updated upper bidiagonal matrix B
- obtained by merging two smaller ones by appending a row. This
- routine is used only for the problem which requires all singular
- values and optionally singular vector matrices in factored form.
- B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE.
- A related subroutine, DLASD1, handles the case in which all singular
- values and singular vectors of the bidiagonal matrix are desired.
-
- DLASD6 computes the SVD as follows:
-
- ( D1(in) 0 0 0 )
- B = U(in) * ( Z1' a Z2' b ) * VT(in)
- ( 0 0 D2(in) 0 )
-
- = U(out) * ( D(out) 0) * VT(out)
-
- where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
- with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
- elsewhere; and the entry b is empty if SQRE = 0.
-
- The singular values of B can be computed using D1, D2, the first
- components of all the right singular vectors of the lower block, and
- the last components of all the right singular vectors of the upper
- block. These components are stored and updated in VF and VL,
- respectively, in DLASD6. Hence U and VT are not explicitly
- referenced.
-
- The singular values are stored in D. The algorithm consists of two
- stages:
-
- The first stage consists of deflating the size of the problem
- when there are multiple singular values or if there is a zero
- in the Z vector. For each such occurence the dimension of the
- secular equation problem is reduced by one. This stage is
- performed by the routine DLASD7.
-
- The second stage consists of calculating the updated
- singular values. This is done by finding the roots of the
- secular equation via the routine DLASD4 (as called by DLASD8).
- This routine also updates VF and VL and computes the distances
- between the updated singular values and the old singular
- values.
-
- DLASD6 is called from DLASDA.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- Specifies whether singular vectors are to be computed in
- factored form:
- = 0: Compute singular values only.
- = 1: Compute singular vectors in factored form as well.
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has row dimension N = NL + NR + 1,
- and column dimension M = N + SQRE.
-
- D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ).
- On entry D(1:NL,1:NL) contains the singular values of the
- upper block, and D(NL+2:N) contains the singular values
- of the lower block. On exit D(1:N) contains the singular
- values of the modified matrix.
-
- VF (input/output) DOUBLE PRECISION array, dimension ( M )
- On entry, VF(1:NL+1) contains the first components of all
- right singular vectors of the upper block; and VF(NL+2:M)
- contains the first components of all right singular vectors
- of the lower block. On exit, VF contains the first components
- of all right singular vectors of the bidiagonal matrix.
-
- VL (input/output) DOUBLE PRECISION array, dimension ( M )
- On entry, VL(1:NL+1) contains the last components of all
- right singular vectors of the upper block; and VL(NL+2:M)
- contains the last components of all right singular vectors of
- the lower block. On exit, VL contains the last components of
- all right singular vectors of the bidiagonal matrix.
-
- ALPHA (input) DOUBLE PRECISION
- Contains the diagonal element associated with the added row.
-
- BETA (input) DOUBLE PRECISION
- Contains the off-diagonal element associated with the added
- row.
-
- IDXQ (output) INTEGER array, dimension ( N )
- This contains the permutation which will reintegrate the
- subproblem just solved back into sorted order, i.e.
- D( IDXQ( I = 1, N ) ) will be in ascending order.
-
- PERM (output) INTEGER array, dimension ( N )
- The permutations (from deflation and sorting) to be applied
- to each block. Not referenced if ICOMPQ = 0.
-
- GIVPTR (output) INTEGER
- The number of Givens rotations which took place in this
- subproblem. Not referenced if ICOMPQ = 0.
-
- GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
- Each pair of numbers indicates a pair of columns to take place
- in a Givens rotation. Not referenced if ICOMPQ = 0.
-
- LDGCOL (input) INTEGER
- leading dimension of GIVCOL, must be at least N.
-
- GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
- Each number indicates the C or S value to be used in the
- corresponding Givens rotation. Not referenced if ICOMPQ = 0.
-
- LDGNUM (input) INTEGER
- The leading dimension of GIVNUM and POLES, must be at least N.
-
- POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
- On exit, POLES(1,*) is an array containing the new singular
- values obtained from solving the secular equation, and
- POLES(2,*) is an array containing the poles in the secular
- equation. Not referenced if ICOMPQ = 0.
-
- DIFL (output) DOUBLE PRECISION array, dimension ( N )
- On exit, DIFL(I) is the distance between I-th updated
- (undeflated) singular value and the I-th (undeflated) old
- singular value.
-
- DIFR (output) DOUBLE PRECISION array,
- dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and
- dimension ( N ) if ICOMPQ = 0.
- On exit, DIFR(I, 1) is the distance between I-th updated
- (undeflated) singular value and the I+1-th (undeflated) old
- singular value.
-
- If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
- normalizing factors for the right singular vector matrix.
-
- See DLASD8 for details on DIFL and DIFR.
-
- Z (output) DOUBLE PRECISION array, dimension ( M )
- The first elements of this array contain the components
- of the deflation-adjusted updating row vector.
-
- K (output) INTEGER
- Contains the dimension of the non-deflated matrix,
- This is the order of the related secular equation. 1 <= K <=N.
-
- C (output) DOUBLE PRECISION
- C contains garbage if SQRE =0 and the C-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- S (output) DOUBLE PRECISION
- S contains garbage if SQRE =0 and the S-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M )
-
- IWORK (workspace) INTEGER array, dimension ( 3 * N )
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --vf;
- --vl;
- --idxq;
- --perm;
- givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
- givcol -= givcol_offset;
- poles_dim1 = *ldgnum;
- poles_offset = 1 + poles_dim1 * 1;
- poles -= poles_offset;
- givnum_dim1 = *ldgnum;
- givnum_offset = 1 + givnum_dim1 * 1;
- givnum -= givnum_offset;
- --difl;
- --difr;
- --z__;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
- n = *nl + *nr + 1;
- m = n + *sqre;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*nl < 1) {
- *info = -2;
- } else if (*nr < 1) {
- *info = -3;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -4;
- } else if (*ldgcol < n) {
- *info = -14;
- } else if (*ldgnum < n) {
- *info = -16;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD6", &i__1);
- return 0;
- }
-
-/*
- The following values are for bookkeeping purposes only. They are
- integer pointers which indicate the portion of the workspace
- used by a particular array in DLASD7 and DLASD8.
-*/
-
- isigma = 1;
- iw = isigma + n;
- ivfw = iw + m;
- ivlw = ivfw + m;
-
- idx = 1;
- idxc = idx + n;
- idxp = idxc + n;
-
-/*
- Scale.
-
- Computing MAX
-*/
- d__1 = abs(*alpha), d__2 = abs(*beta);
- orgnrm = max(d__1,d__2);
- d__[*nl + 1] = 0.;
- i__1 = n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
- orgnrm = (d__1 = d__[i__], abs(d__1));
- }
-/* L10: */
- }
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &n, &c__1, &d__[1], &n, info);
- *alpha /= orgnrm;
- *beta /= orgnrm;
-
-/* Sort and Deflate singular values. */
-
- dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
- work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
- iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
- givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
- info);
-
-/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
-
- dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
- ldgnum, &work[isigma], &work[iw], info);
-
-/* Save the poles if ICOMPQ = 1. */
-
- if (*icompq == 1) {
- dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
- dcopy_(k, &work[isigma], &c__1, &poles[((poles_dim1) << (1)) + 1], &
- c__1);
- }
-
-/* Unscale. */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &n, &c__1, &d__[1], &n, info);
-
-/* Prepare the IDXQ sorting permutation. */
-
- n1 = *k;
- n2 = n - *k;
- dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
-
- return 0;
-
-/* End of DLASD6 */
-
-} /* dlasd6_ */
-
-/* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr,
- integer *sqre, integer *k, doublereal *d__, doublereal *z__,
- doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl,
- doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal *
- dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm,
- integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
- integer *ldgnum, doublereal *c__, doublereal *s, integer *info)
-{
- /* System generated locals */
- integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static integer i__, j, m, n, k2;
- static doublereal z1;
- static integer jp;
- static doublereal eps, tau, tol;
- static integer nlp1, nlp2, idxi, idxj;
- extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *);
- static integer idxjp;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer jprev;
-
- extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
- integer *, integer *, integer *), xerbla_(char *, integer *);
- static doublereal hlftol;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASD7 merges the two sets of singular values together into a single
- sorted set. Then it tries to deflate the size of the problem. There
- are two ways in which deflation can occur: when two or more singular
- values are close together or if there is a tiny entry in the Z
- vector. For each such occurrence the order of the related
- secular equation problem is reduced by one.
-
- DLASD7 is called from DLASD6.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- Specifies whether singular vectors are to be computed
- in compact form, as follows:
- = 0: Compute singular values only.
- = 1: Compute singular vectors of upper
- bidiagonal matrix in compact form.
-
- NL (input) INTEGER
- The row dimension of the upper block. NL >= 1.
-
- NR (input) INTEGER
- The row dimension of the lower block. NR >= 1.
-
- SQRE (input) INTEGER
- = 0: the lower block is an NR-by-NR square matrix.
- = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
-
- The bidiagonal matrix has
- N = NL + NR + 1 rows and
- M = N + SQRE >= N columns.
-
- K (output) INTEGER
- Contains the dimension of the non-deflated matrix, this is
- the order of the related secular equation. 1 <= K <=N.
-
- D (input/output) DOUBLE PRECISION array, dimension ( N )
- On entry D contains the singular values of the two submatrices
- to be combined. On exit D contains the trailing (N-K) updated
- singular values (those which were deflated) sorted into
- increasing order.
-
- Z (output) DOUBLE PRECISION array, dimension ( M )
- On exit Z contains the updating row vector in the secular
- equation.
-
- ZW (workspace) DOUBLE PRECISION array, dimension ( M )
- Workspace for Z.
-
- VF (input/output) DOUBLE PRECISION array, dimension ( M )
- On entry, VF(1:NL+1) contains the first components of all
- right singular vectors of the upper block; and VF(NL+2:M)
- contains the first components of all right singular vectors
- of the lower block. On exit, VF contains the first components
- of all right singular vectors of the bidiagonal matrix.
-
- VFW (workspace) DOUBLE PRECISION array, dimension ( M )
- Workspace for VF.
-
- VL (input/output) DOUBLE PRECISION array, dimension ( M )
- On entry, VL(1:NL+1) contains the last components of all
- right singular vectors of the upper block; and VL(NL+2:M)
- contains the last components of all right singular vectors
- of the lower block. On exit, VL contains the last components
- of all right singular vectors of the bidiagonal matrix.
-
- VLW (workspace) DOUBLE PRECISION array, dimension ( M )
- Workspace for VL.
-
- ALPHA (input) DOUBLE PRECISION
- Contains the diagonal element associated with the added row.
-
- BETA (input) DOUBLE PRECISION
- Contains the off-diagonal element associated with the added
- row.
-
- DSIGMA (output) DOUBLE PRECISION array, dimension ( N )
- Contains a copy of the diagonal elements (K-1 singular values
- and one zero) in the secular equation.
-
- IDX (workspace) INTEGER array, dimension ( N )
- This will contain the permutation used to sort the contents of
- D into ascending order.
-
- IDXP (workspace) INTEGER array, dimension ( N )
- This will contain the permutation used to place deflated
- values of D at the end of the array. On output IDXP(2:K)
- points to the nondeflated D-values and IDXP(K+1:N)
- points to the deflated singular values.
-
- IDXQ (input) INTEGER array, dimension ( N )
- This contains the permutation which separately sorts the two
- sub-problems in D into ascending order. Note that entries in
- the first half of this permutation must first be moved one
- position backward; and entries in the second half
- must first have NL+1 added to their values.
-
- PERM (output) INTEGER array, dimension ( N )
- The permutations (from deflation and sorting) to be applied
- to each singular block. Not referenced if ICOMPQ = 0.
-
- GIVPTR (output) INTEGER
- The number of Givens rotations which took place in this
- subproblem. Not referenced if ICOMPQ = 0.
-
- GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
- Each pair of numbers indicates a pair of columns to take place
- in a Givens rotation. Not referenced if ICOMPQ = 0.
-
- LDGCOL (input) INTEGER
- The leading dimension of GIVCOL, must be at least N.
-
- GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
- Each number indicates the C or S value to be used in the
- corresponding Givens rotation. Not referenced if ICOMPQ = 0.
-
- LDGNUM (input) INTEGER
- The leading dimension of GIVNUM, must be at least N.
-
- C (output) DOUBLE PRECISION
- C contains garbage if SQRE =0 and the C-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- S (output) DOUBLE PRECISION
- S contains garbage if SQRE =0 and the S-value of a Givens
- rotation related to the right null space if SQRE = 1.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --z__;
- --zw;
- --vf;
- --vfw;
- --vl;
- --vlw;
- --dsigma;
- --idx;
- --idxp;
- --idxq;
- --perm;
- givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
- givcol -= givcol_offset;
- givnum_dim1 = *ldgnum;
- givnum_offset = 1 + givnum_dim1 * 1;
- givnum -= givnum_offset;
-
- /* Function Body */
- *info = 0;
- n = *nl + *nr + 1;
- m = n + *sqre;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*nl < 1) {
- *info = -2;
- } else if (*nr < 1) {
- *info = -3;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -4;
- } else if (*ldgcol < n) {
- *info = -22;
- } else if (*ldgnum < n) {
- *info = -24;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD7", &i__1);
- return 0;
- }
-
- nlp1 = *nl + 1;
- nlp2 = *nl + 2;
- if (*icompq == 1) {
- *givptr = 0;
- }
-
-/*
- Generate the first part of the vector Z and move the singular
- values in the first part of D one position backward.
-*/
-
- z1 = *alpha * vl[nlp1];
- vl[nlp1] = 0.;
- tau = vf[nlp1];
- for (i__ = *nl; i__ >= 1; --i__) {
- z__[i__ + 1] = *alpha * vl[i__];
- vl[i__] = 0.;
- vf[i__ + 1] = vf[i__];
- d__[i__ + 1] = d__[i__];
- idxq[i__ + 1] = idxq[i__] + 1;
-/* L10: */
- }
- vf[1] = tau;
-
-/* Generate the second part of the vector Z. */
-
- i__1 = m;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- z__[i__] = *beta * vf[i__];
- vf[i__] = 0.;
-/* L20: */
- }
-
-/* Sort the singular values into increasing order */
-
- i__1 = n;
- for (i__ = nlp2; i__ <= i__1; ++i__) {
- idxq[i__] += nlp1;
-/* L30: */
- }
-
-/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
-
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- dsigma[i__] = d__[idxq[i__]];
- zw[i__] = z__[idxq[i__]];
- vfw[i__] = vf[idxq[i__]];
- vlw[i__] = vl[idxq[i__]];
-/* L40: */
- }
-
- dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
-
- i__1 = n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- idxi = idx[i__] + 1;
- d__[i__] = dsigma[idxi];
- z__[i__] = zw[idxi];
- vf[i__] = vfw[idxi];
- vl[i__] = vlw[idxi];
-/* L50: */
- }
-
-/* Calculate the allowable deflation tolerence */
-
- eps = EPSILON;
-/* Computing MAX */
- d__1 = abs(*alpha), d__2 = abs(*beta);
- tol = max(d__1,d__2);
-/* Computing MAX */
- d__2 = (d__1 = d__[n], abs(d__1));
- tol = eps * 64. * max(d__2,tol);
-
-/*
- There are 2 kinds of deflation -- first a value in the z-vector
- is small, second two (or more) singular values are very close
- together (their difference is small).
-
- If the value in the z-vector is small, we simply permute the
- array so that the corresponding singular value is moved to the
- end.
-
- If two values in the D-vector are close, we perform a two-sided
- rotation designed to make one of the corresponding z-vector
- entries zero, and then permute the array so that the deflated
- singular value is moved to the end.
-
- If there are multiple singular values then the problem deflates.
- Here the number of equal singular values are found. As each equal
- singular value is found, an elementary reflector is computed to
- rotate the corresponding singular subspace so that the
- corresponding components of Z are zero in this new basis.
-*/
-
- *k = 1;
- k2 = n + 1;
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- if ((d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- idxp[k2] = j;
- if (j == n) {
- goto L100;
- }
- } else {
- jprev = j;
- goto L70;
- }
-/* L60: */
- }
-L70:
- j = jprev;
-L80:
- ++j;
- if (j > n) {
- goto L90;
- }
- if ((d__1 = z__[j], abs(d__1)) <= tol) {
-
-/* Deflate due to small z component. */
-
- --k2;
- idxp[k2] = j;
- } else {
-
-/* Check if singular values are close enough to allow deflation. */
-
- if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
-
-/* Deflation is possible. */
-
- *s = z__[jprev];
- *c__ = z__[j];
-
-/*
- Find sqrt(a**2+b**2) without overflow or
- destructive underflow.
-*/
-
- tau = dlapy2_(c__, s);
- z__[j] = tau;
- z__[jprev] = 0.;
- *c__ /= tau;
- *s = -(*s) / tau;
-
-/* Record the appropriate Givens rotation */
-
- if (*icompq == 1) {
- ++(*givptr);
- idxjp = idxq[idx[jprev] + 1];
- idxj = idxq[idx[j] + 1];
- if (idxjp <= nlp1) {
- --idxjp;
- }
- if (idxj <= nlp1) {
- --idxj;
- }
- givcol[*givptr + ((givcol_dim1) << (1))] = idxjp;
- givcol[*givptr + givcol_dim1] = idxj;
- givnum[*givptr + ((givnum_dim1) << (1))] = *c__;
- givnum[*givptr + givnum_dim1] = *s;
- }
- drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
- drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
- --k2;
- idxp[k2] = jprev;
- jprev = j;
- } else {
- ++(*k);
- zw[*k] = z__[jprev];
- dsigma[*k] = d__[jprev];
- idxp[*k] = jprev;
- jprev = j;
- }
- }
- goto L80;
-L90:
-
-/* Record the last singular value. */
-
- ++(*k);
- zw[*k] = z__[jprev];
- dsigma[*k] = d__[jprev];
- idxp[*k] = jprev;
-
-L100:
-
-/*
- Sort the singular values into DSIGMA. The singular values which
- were not deflated go into the first K slots of DSIGMA, except
- that DSIGMA(1) is treated separately.
-*/
-
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- jp = idxp[j];
- dsigma[j] = d__[jp];
- vfw[j] = vf[jp];
- vlw[j] = vl[jp];
-/* L110: */
- }
- if (*icompq == 1) {
- i__1 = n;
- for (j = 2; j <= i__1; ++j) {
- jp = idxp[j];
- perm[j] = idxq[idx[jp] + 1];
- if (perm[j] <= nlp1) {
- --perm[j];
- }
-/* L120: */
- }
- }
-
-/*
- The deflated singular values go back into the last N - K slots of
- D.
-*/
-
- i__1 = n - *k;
- dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
-
-/*
- Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and
- VL(M).
-*/
-
- dsigma[1] = 0.;
- hlftol = tol / 2.;
- if (abs(dsigma[2]) <= hlftol) {
- dsigma[2] = hlftol;
- }
- if (m > n) {
- z__[1] = dlapy2_(&z1, &z__[m]);
- if (z__[1] <= tol) {
- *c__ = 1.;
- *s = 0.;
- z__[1] = tol;
- } else {
- *c__ = z1 / z__[1];
- *s = -z__[m] / z__[1];
- }
- drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
- drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
- } else {
- if (abs(z1) <= tol) {
- z__[1] = tol;
- } else {
- z__[1] = z1;
- }
- }
-
-/* Restore Z, VF, and VL. */
-
- i__1 = *k - 1;
- dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
- i__1 = n - 1;
- dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
- i__1 = n - 1;
- dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
-
- return 0;
-
-/* End of DLASD7 */
-
-} /* dlasd7_ */
-
-/* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__,
- doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl,
- doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal *
- work, integer *info)
-{
- /* System generated locals */
- integer difr_dim1, difr_offset, i__1, i__2;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static integer i__, j;
- static doublereal dj, rho;
- static integer iwk1, iwk2, iwk3;
- extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- static doublereal temp;
- extern doublereal dnrm2_(integer *, doublereal *, integer *);
- static integer iwk2i, iwk3i;
- static doublereal diflj, difrj, dsigj;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- extern doublereal dlamc3_(doublereal *, doublereal *);
- extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *), dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlaset_(char *, integer *, integer
- *, doublereal *, doublereal *, doublereal *, integer *),
- xerbla_(char *, integer *);
- static doublereal dsigjp;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
- Courant Institute, NAG Ltd., and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASD8 finds the square roots of the roots of the secular equation,
- as defined by the values in DSIGMA and Z. It makes the appropriate
- calls to DLASD4, and stores, for each element in D, the distance
- to its two nearest poles (elements in DSIGMA). It also updates
- the arrays VF and VL, the first and last components of all the
- right singular vectors of the original bidiagonal matrix.
-
- DLASD8 is called from DLASD6.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- Specifies whether singular vectors are to be computed in
- factored form in the calling routine:
- = 0: Compute singular values only.
- = 1: Compute singular vectors in factored form as well.
-
- K (input) INTEGER
- The number of terms in the rational function to be solved
- by DLASD4. K >= 1.
-
- D (output) DOUBLE PRECISION array, dimension ( K )
- On output, D contains the updated singular values.
-
- Z (input) DOUBLE PRECISION array, dimension ( K )
- The first K elements of this array contain the components
- of the deflation-adjusted updating row vector.
-
- VF (input/output) DOUBLE PRECISION array, dimension ( K )
- On entry, VF contains information passed through DBEDE8.
- On exit, VF contains the first K components of the first
- components of all right singular vectors of the bidiagonal
- matrix.
-
- VL (input/output) DOUBLE PRECISION array, dimension ( K )
- On entry, VL contains information passed through DBEDE8.
- On exit, VL contains the first K components of the last
- components of all right singular vectors of the bidiagonal
- matrix.
-
- DIFL (output) DOUBLE PRECISION array, dimension ( K )
- On exit, DIFL(I) = D(I) - DSIGMA(I).
-
- DIFR (output) DOUBLE PRECISION array,
- dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and
- dimension ( K ) if ICOMPQ = 0.
- On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not
- defined and will not be referenced.
-
- If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
- normalizing factors for the right singular vector matrix.
-
- LDDIFR (input) INTEGER
- The leading dimension of DIFR, must be at least K.
-
- DSIGMA (input) DOUBLE PRECISION array, dimension ( K )
- The first K elements of this array contain the old roots
- of the deflated updating problem. These are the poles
- of the secular equation.
-
- WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --z__;
- --vf;
- --vl;
- --difl;
- difr_dim1 = *lddifr;
- difr_offset = 1 + difr_dim1 * 1;
- difr -= difr_offset;
- --dsigma;
- --work;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*k < 1) {
- *info = -2;
- } else if (*lddifr < *k) {
- *info = -9;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASD8", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*k == 1) {
- d__[1] = abs(z__[1]);
- difl[1] = d__[1];
- if (*icompq == 1) {
- difl[2] = 1.;
- difr[((difr_dim1) << (1)) + 1] = 1.;
- }
- return 0;
- }
-
-/*
- Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
- be computed with high relative accuracy (barring over/underflow).
- This is a problem on machines without a guard digit in
- add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
- The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
- which on any of these machines zeros out the bottommost
- bit of DSIGMA(I) if it is 1; this makes the subsequent
- subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
- occurs. On binary machines with a guard digit (almost all
- machines) it does not change DSIGMA(I) at all. On hexadecimal
- and decimal machines with a guard digit, it slightly
- changes the bottommost bits of DSIGMA(I). It does not account
- for hexadecimal or decimal machines without guard digits
- (we know of none). We use a subroutine call to compute
- 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
- this code.
-*/
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
-/* L10: */
- }
-
-/* Book keeping. */
-
- iwk1 = 1;
- iwk2 = iwk1 + *k;
- iwk3 = iwk2 + *k;
- iwk2i = iwk2 - 1;
- iwk3i = iwk3 - 1;
-
-/* Normalize Z. */
-
- rho = dnrm2_(k, &z__[1], &c__1);
- dlascl_("G", &c__0, &c__0, &rho, &c_b15, k, &c__1, &z__[1], k, info);
- rho *= rho;
-
-/* Initialize WORK(IWK3). */
-
- dlaset_("A", k, &c__1, &c_b15, &c_b15, &work[iwk3], k);
-
-/*
- Compute the updated singular values, the arrays DIFL, DIFR,
- and the updated Z.
-*/
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
- iwk2], info);
-
-/* If the root finder fails, the computation is terminated. */
-
- if (*info != 0) {
- return 0;
- }
- work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
- difl[j] = -work[j];
- difr[j + difr_dim1] = -work[j + 1];
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
- i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
- j]);
-/* L20: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
- i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
- j]);
-/* L30: */
- }
-/* L40: */
- }
-
-/* Compute updated Z. */
-
- i__1 = *k;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1)));
- z__[i__] = d_sign(&d__2, &z__[i__]);
-/* L50: */
- }
-
-/* Update VF and VL. */
-
- i__1 = *k;
- for (j = 1; j <= i__1; ++j) {
- diflj = difl[j];
- dj = d__[j];
- dsigj = -dsigma[j];
- if (j < *k) {
- difrj = -difr[j + difr_dim1];
- dsigjp = -dsigma[j + 1];
- }
- work[j] = -z__[j] / diflj / (dsigma[j] + dj);
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / (
- dsigma[i__] + dj);
-/* L60: */
- }
- i__2 = *k;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) /
- (dsigma[i__] + dj);
-/* L70: */
- }
- temp = dnrm2_(k, &work[1], &c__1);
- work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
- work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
- if (*icompq == 1) {
- difr[j + ((difr_dim1) << (1))] = temp;
- }
-/* L80: */
- }
-
- dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
- dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
-
- return 0;
-
-/* End of DLASD8 */
-
-} /* dlasd8_ */
-
-/* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n,
- integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer
- *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr,
- doublereal *z__, doublereal *poles, integer *givptr, integer *givcol,
- integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__,
- doublereal *s, doublereal *work, integer *iwork, integer *info)
-{
- /* System generated locals */
- integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
- difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
- poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
- z_dim1, z_offset, i__1, i__2;
-
- /* Builtin functions */
- integer pow_ii(integer *, integer *);
-
- /* Local variables */
- static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc,
- nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
- static doublereal beta;
- static integer idxq, nlvl;
- static doublereal alpha;
- static integer inode, ndiml, ndimr, idxqi, itemp;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer sqrei;
- extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, integer *, integer *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, doublereal *,
- doublereal *, integer *, integer *);
- static integer nwork1, nwork2;
- extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
- *, integer *, integer *, doublereal *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *), dlasdt_(integer *, integer *,
- integer *, integer *, integer *, integer *, integer *), dlaset_(
- char *, integer *, integer *, doublereal *, doublereal *,
- doublereal *, integer *), xerbla_(char *, integer *);
- static integer smlszp;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- Using a divide and conquer approach, DLASDA computes the singular
- value decomposition (SVD) of a real upper bidiagonal N-by-M matrix
- B with diagonal D and offdiagonal E, where M = N + SQRE. The
- algorithm computes the singular values in the SVD B = U * S * VT.
- The orthogonal matrices U and VT are optionally computed in
- compact form.
-
- A related subroutine, DLASD0, computes the singular values and
- the singular vectors in explicit form.
-
- Arguments
- =========
-
- ICOMPQ (input) INTEGER
- Specifies whether singular vectors are to be computed
- in compact form, as follows
- = 0: Compute singular values only.
- = 1: Compute singular vectors of upper bidiagonal
- matrix in compact form.
-
- SMLSIZ (input) INTEGER
- The maximum size of the subproblems at the bottom of the
- computation tree.
-
- N (input) INTEGER
- The row dimension of the upper bidiagonal matrix. This is
- also the dimension of the main diagonal array D.
-
- SQRE (input) INTEGER
- Specifies the column dimension of the bidiagonal matrix.
- = 0: The bidiagonal matrix has column dimension M = N;
- = 1: The bidiagonal matrix has column dimension M = N + 1.
-
- D (input/output) DOUBLE PRECISION array, dimension ( N )
- On entry D contains the main diagonal of the bidiagonal
- matrix. On exit D, if INFO = 0, contains its singular values.
-
- E (input) DOUBLE PRECISION array, dimension ( M-1 )
- Contains the subdiagonal entries of the bidiagonal matrix.
- On exit, E has been destroyed.
-
- U (output) DOUBLE PRECISION array,
- dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced
- if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left
- singular vector matrices of all subproblems at the bottom
- level.
-
- LDU (input) INTEGER, LDU = > N.
- The leading dimension of arrays U, VT, DIFL, DIFR, POLES,
- GIVNUM, and Z.
-
- VT (output) DOUBLE PRECISION array,
- dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced
- if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right
- singular vector matrices of all subproblems at the bottom
- level.
-
- K (output) INTEGER array,
- dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0.
- If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th
- secular equation on the computation tree.
-
- DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ),
- where NLVL = floor(log_2 (N/SMLSIZ))).
-
- DIFR (output) DOUBLE PRECISION array,
- dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and
- dimension ( N ) if ICOMPQ = 0.
- If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1)
- record distances between singular values on the I-th
- level and singular values on the (I -1)-th level, and
- DIFR(1:N, 2 * I ) contains the normalizing factors for
- the right singular vector matrix. See DLASD8 for details.
-
- Z (output) DOUBLE PRECISION array,
- dimension ( LDU, NLVL ) if ICOMPQ = 1 and
- dimension ( N ) if ICOMPQ = 0.
- The first K elements of Z(1, I) contain the components of
- the deflation-adjusted updating row vector for subproblems
- on the I-th level.
-
- POLES (output) DOUBLE PRECISION array,
- dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced
- if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and
- POLES(1, 2*I) contain the new and old singular values
- involved in the secular equations on the I-th level.
-
- GIVPTR (output) INTEGER array,
- dimension ( N ) if ICOMPQ = 1, and not referenced if
- ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records
- the number of Givens rotations performed on the I-th
- problem on the computation tree.
-
- GIVCOL (output) INTEGER array,
- dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not
- referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
- GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations
- of Givens rotations performed on the I-th level on the
- computation tree.
-
- LDGCOL (input) INTEGER, LDGCOL = > N.
- The leading dimension of arrays GIVCOL and PERM.
-
- PERM (output) INTEGER array,
- dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced
- if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records
- permutations done on the I-th level of the computation tree.
-
- GIVNUM (output) DOUBLE PRECISION array,
- dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not
- referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
- GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S-
- values of Givens rotations performed on the I-th level on
- the computation tree.
-
- C (output) DOUBLE PRECISION array,
- dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0.
- If ICOMPQ = 1 and the I-th subproblem is not square, on exit,
- C( I ) contains the C-value of a Givens rotation related to
- the right null space of the I-th subproblem.
-
- S (output) DOUBLE PRECISION array, dimension ( N ) if
- ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1
- and the I-th subproblem is not square, on exit, S( I )
- contains the S-value of a Givens rotation related to
- the right null space of the I-th subproblem.
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)).
-
- IWORK (workspace) INTEGER array.
- Dimension must be at least (7 * N).
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: if INFO = 1, an singular value did not converge
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- givnum_dim1 = *ldu;
- givnum_offset = 1 + givnum_dim1 * 1;
- givnum -= givnum_offset;
- poles_dim1 = *ldu;
- poles_offset = 1 + poles_dim1 * 1;
- poles -= poles_offset;
- z_dim1 = *ldu;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
- difr_dim1 = *ldu;
- difr_offset = 1 + difr_dim1 * 1;
- difr -= difr_offset;
- difl_dim1 = *ldu;
- difl_offset = 1 + difl_dim1 * 1;
- difl -= difl_offset;
- vt_dim1 = *ldu;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- --k;
- --givptr;
- perm_dim1 = *ldgcol;
- perm_offset = 1 + perm_dim1 * 1;
- perm -= perm_offset;
- givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
- givcol -= givcol_offset;
- --c__;
- --s;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
-
- if (*icompq < 0 || *icompq > 1) {
- *info = -1;
- } else if (*smlsiz < 3) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -4;
- } else if (*ldu < *n + *sqre) {
- *info = -8;
- } else if (*ldgcol < *n) {
- *info = -17;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASDA", &i__1);
- return 0;
- }
-
- m = *n + *sqre;
-
-/* If the input matrix is too small, call DLASDQ to find the SVD. */
-
- if (*n <= *smlsiz) {
- if (*icompq == 0) {
- dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
- vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
- work[1], info);
- } else {
- dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
- , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
- info);
- }
- return 0;
- }
-
-/* Book-keeping and set up the computation tree. */
-
- inode = 1;
- ndiml = inode + *n;
- ndimr = ndiml + *n;
- idxq = ndimr + *n;
- iwk = idxq + *n;
-
- ncc = 0;
- nru = 0;
-
- smlszp = *smlsiz + 1;
- vf = 1;
- vl = vf + m;
- nwork1 = vl + m;
- nwork2 = nwork1 + smlszp * smlszp;
-
- dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
- smlsiz);
-
-/*
- for the nodes on bottom level of the tree, solve
- their subproblems by DLASDQ.
-*/
-
- ndb1 = (nd + 1) / 2;
- i__1 = nd;
- for (i__ = ndb1; i__ <= i__1; ++i__) {
-
-/*
- IC : center row of each node
- NL : number of rows of left subproblem
- NR : number of rows of right subproblem
- NLF: starting row of the left subproblem
- NRF: starting row of the right subproblem
-*/
-
- i1 = i__ - 1;
- ic = iwork[inode + i1];
- nl = iwork[ndiml + i1];
- nlp1 = nl + 1;
- nr = iwork[ndimr + i1];
- nlf = ic - nl;
- nrf = ic + 1;
- idxqi = idxq + nlf - 2;
- vfi = vf + nlf - 1;
- vli = vl + nlf - 1;
- sqrei = 1;
- if (*icompq == 0) {
- dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &work[nwork1], &smlszp);
- dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
- work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
- &nl, &work[nwork2], info);
- itemp = nwork1 + nl * smlszp;
- dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
- dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
- } else {
- dlaset_("A", &nl, &nl, &c_b29, &c_b15, &u[nlf + u_dim1], ldu);
- dlaset_("A", &nlp1, &nlp1, &c_b29, &c_b15, &vt[nlf + vt_dim1],
- ldu);
- dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
- vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
- u_dim1], ldu, &work[nwork1], info);
- dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
- dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
- ;
- }
- if (*info != 0) {
- return 0;
- }
- i__2 = nl;
- for (j = 1; j <= i__2; ++j) {
- iwork[idxqi + j] = j;
-/* L10: */
- }
- if ((i__ == nd && *sqre == 0)) {
- sqrei = 0;
- } else {
- sqrei = 1;
- }
- idxqi += nlp1;
- vfi += nlp1;
- vli += nlp1;
- nrp1 = nr + sqrei;
- if (*icompq == 0) {
- dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &work[nwork1], &smlszp);
- dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
- work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
- &nr, &work[nwork2], info);
- itemp = nwork1 + (nrp1 - 1) * smlszp;
- dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
- dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
- } else {
- dlaset_("A", &nr, &nr, &c_b29, &c_b15, &u[nrf + u_dim1], ldu);
- dlaset_("A", &nrp1, &nrp1, &c_b29, &c_b15, &vt[nrf + vt_dim1],
- ldu);
- dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
- vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
- u_dim1], ldu, &work[nwork1], info);
- dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
- dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
- ;
- }
- if (*info != 0) {
- return 0;
- }
- i__2 = nr;
- for (j = 1; j <= i__2; ++j) {
- iwork[idxqi + j] = j;
-/* L20: */
- }
-/* L30: */
- }
-
-/* Now conquer each subproblem bottom-up. */
-
- j = pow_ii(&c__2, &nlvl);
- for (lvl = nlvl; lvl >= 1; --lvl) {
- lvl2 = ((lvl) << (1)) - 1;
-
-/*
- Find the first node LF and last node LL on
- the current level LVL.
-*/
-
- if (lvl == 1) {
- lf = 1;
- ll = 1;
- } else {
- i__1 = lvl - 1;
- lf = pow_ii(&c__2, &i__1);
- ll = ((lf) << (1)) - 1;
- }
- i__1 = ll;
- for (i__ = lf; i__ <= i__1; ++i__) {
- im1 = i__ - 1;
- ic = iwork[inode + im1];
- nl = iwork[ndiml + im1];
- nr = iwork[ndimr + im1];
- nlf = ic - nl;
- nrf = ic + 1;
- if (i__ == ll) {
- sqrei = *sqre;
- } else {
- sqrei = 1;
- }
- vfi = vf + nlf - 1;
- vli = vl + nlf - 1;
- idxqi = idxq + nlf - 1;
- alpha = d__[ic];
- beta = e[ic];
- if (*icompq == 0) {
- dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
- work[vli], &alpha, &beta, &iwork[idxqi], &perm[
- perm_offset], &givptr[1], &givcol[givcol_offset],
- ldgcol, &givnum[givnum_offset], ldu, &poles[
- poles_offset], &difl[difl_offset], &difr[difr_offset],
- &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
- &iwork[iwk], info);
- } else {
- --j;
- dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
- work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
- lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
- givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
- givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
- difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
- difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
- &s[j], &work[nwork1], &iwork[iwk], info);
- }
- if (*info != 0) {
- return 0;
- }
-/* L40: */
- }
-/* L50: */
- }
-
- return 0;
-
-/* End of DLASDA */
-
-} /* dlasda_ */
-
-/* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer *
- ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e,
- doublereal *vt, integer *ldvt, doublereal *u, integer *ldu,
- doublereal *c__, integer *ldc, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
- i__2;
-
- /* Local variables */
- static integer i__, j;
- static doublereal r__, cs, sn;
- static integer np1, isub;
- static doublereal smin;
- static integer sqre1;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *
- , doublereal *, integer *);
- static integer iuplo;
- extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *), xerbla_(char *,
- integer *), dbdsqr_(char *, integer *, integer *, integer
- *, integer *, doublereal *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *);
- static logical rotate;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASDQ computes the singular value decomposition (SVD) of a real
- (upper or lower) bidiagonal matrix with diagonal D and offdiagonal
- E, accumulating the transformations if desired. Letting B denote
- the input bidiagonal matrix, the algorithm computes orthogonal
- matrices Q and P such that B = Q * S * P' (P' denotes the transpose
- of P). The singular values S are overwritten on D.
-
- The input matrix U is changed to U * Q if desired.
- The input matrix VT is changed to P' * VT if desired.
- The input matrix C is changed to Q' * C if desired.
-
- See "Computing Small Singular Values of Bidiagonal Matrices With
- Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
- LAPACK Working Note #3, for a detailed description of the algorithm.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- On entry, UPLO specifies whether the input bidiagonal matrix
- is upper or lower bidiagonal, and wether it is square are
- not.
- UPLO = 'U' or 'u' B is upper bidiagonal.
- UPLO = 'L' or 'l' B is lower bidiagonal.
-
- SQRE (input) INTEGER
- = 0: then the input matrix is N-by-N.
- = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and
- (N+1)-by-N if UPLU = 'L'.
-
- The bidiagonal matrix has
- N = NL + NR + 1 rows and
- M = N + SQRE >= N columns.
-
- N (input) INTEGER
- On entry, N specifies the number of rows and columns
- in the matrix. N must be at least 0.
-
- NCVT (input) INTEGER
- On entry, NCVT specifies the number of columns of
- the matrix VT. NCVT must be at least 0.
-
- NRU (input) INTEGER
- On entry, NRU specifies the number of rows of
- the matrix U. NRU must be at least 0.
-
- NCC (input) INTEGER
- On entry, NCC specifies the number of columns of
- the matrix C. NCC must be at least 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, D contains the diagonal entries of the
- bidiagonal matrix whose SVD is desired. On normal exit,
- D contains the singular values in ascending order.
-
- E (input/output) DOUBLE PRECISION array.
- dimension is (N-1) if SQRE = 0 and N if SQRE = 1.
- On entry, the entries of E contain the offdiagonal entries
- of the bidiagonal matrix whose SVD is desired. On normal
- exit, E will contain 0. If the algorithm does not converge,
- D and E will contain the diagonal and superdiagonal entries
- of a bidiagonal matrix orthogonally equivalent to the one
- given as input.
-
- VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
- On entry, contains a matrix which on exit has been
- premultiplied by P', dimension N-by-NCVT if SQRE = 0
- and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0).
-
- LDVT (input) INTEGER
- On entry, LDVT specifies the leading dimension of VT as
- declared in the calling (sub) program. LDVT must be at
- least 1. If NCVT is nonzero LDVT must also be at least N.
-
- U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
- On entry, contains a matrix which on exit has been
- postmultiplied by Q, dimension NRU-by-N if SQRE = 0
- and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0).
-
- LDU (input) INTEGER
- On entry, LDU specifies the leading dimension of U as
- declared in the calling (sub) program. LDU must be at
- least max( 1, NRU ) .
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
- On entry, contains an N-by-NCC matrix which on exit
- has been premultiplied by Q' dimension N-by-NCC if SQRE = 0
- and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0).
-
- LDC (input) INTEGER
- On entry, LDC specifies the leading dimension of C as
- declared in the calling (sub) program. LDC must be at
- least 1. If NCC is nonzero, LDC must also be at least N.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
- Workspace. Only referenced if one of NCVT, NRU, or NCC is
- nonzero, and if N is at least 2.
-
- INFO (output) INTEGER
- On exit, a value of 0 indicates a successful exit.
- If INFO < 0, argument number -INFO is illegal.
- If INFO > 0, the algorithm did not converge, and INFO
- specifies how many superdiagonals did not converge.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
- vt -= vt_offset;
- u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
- u -= u_offset;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- iuplo = 0;
- if (lsame_(uplo, "U")) {
- iuplo = 1;
- }
- if (lsame_(uplo, "L")) {
- iuplo = 2;
- }
- if (iuplo == 0) {
- *info = -1;
- } else if (*sqre < 0 || *sqre > 1) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*ncvt < 0) {
- *info = -4;
- } else if (*nru < 0) {
- *info = -5;
- } else if (*ncc < 0) {
- *info = -6;
- } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
- {
- *info = -10;
- } else if (*ldu < max(1,*nru)) {
- *info = -12;
- } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
- *info = -14;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASDQ", &i__1);
- return 0;
- }
- if (*n == 0) {
- return 0;
- }
-
-/* ROTATE is true if any singular vectors desired, false otherwise */
-
- rotate = *ncvt > 0 || *nru > 0 || *ncc > 0;
- np1 = *n + 1;
- sqre1 = *sqre;
-
-/*
- If matrix non-square upper bidiagonal, rotate to be lower
- bidiagonal. The rotations are on the right.
-*/
-
- if ((iuplo == 1 && sqre1 == 1)) {
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
- d__[i__] = r__;
- e[i__] = sn * d__[i__ + 1];
- d__[i__ + 1] = cs * d__[i__ + 1];
- if (rotate) {
- work[i__] = cs;
- work[*n + i__] = sn;
- }
-/* L10: */
- }
- dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
- d__[*n] = r__;
- e[*n] = 0.;
- if (rotate) {
- work[*n] = cs;
- work[*n + *n] = sn;
- }
- iuplo = 2;
- sqre1 = 0;
-
-/* Update singular vectors if desired. */
-
- if (*ncvt > 0) {
- dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
- vt_offset], ldvt);
- }
- }
-
-/*
- If matrix lower bidiagonal, rotate to be upper bidiagonal
- by applying Givens rotations on the left.
-*/
-
- if (iuplo == 2) {
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
- d__[i__] = r__;
- e[i__] = sn * d__[i__ + 1];
- d__[i__ + 1] = cs * d__[i__ + 1];
- if (rotate) {
- work[i__] = cs;
- work[*n + i__] = sn;
- }
-/* L20: */
- }
-
-/*
- If matrix (N+1)-by-N lower bidiagonal, one additional
- rotation is needed.
-*/
-
- if (sqre1 == 1) {
- dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
- d__[*n] = r__;
- if (rotate) {
- work[*n] = cs;
- work[*n + *n] = sn;
- }
- }
-
-/* Update singular vectors if desired. */
-
- if (*nru > 0) {
- if (sqre1 == 0) {
- dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
- u_offset], ldu);
- } else {
- dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
- u_offset], ldu);
- }
- }
- if (*ncc > 0) {
- if (sqre1 == 0) {
- dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
- c_offset], ldc);
- } else {
- dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
- c_offset], ldc);
- }
- }
- }
-
-/*
- Call DBDSQR to compute the SVD of the reduced real
- N-by-N upper bidiagonal matrix.
-*/
-
- dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
- u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
-
-/*
- Sort the singular values into ascending order (insertion sort on
- singular values, but only one transposition per singular vector)
-*/
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Scan for smallest D(I). */
-
- isub = i__;
- smin = d__[i__];
- i__2 = *n;
- for (j = i__ + 1; j <= i__2; ++j) {
- if (d__[j] < smin) {
- isub = j;
- smin = d__[j];
- }
-/* L30: */
- }
- if (isub != i__) {
-
-/* Swap singular values and vectors. */
-
- d__[isub] = d__[i__];
- d__[i__] = smin;
- if (*ncvt > 0) {
- dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
- ldvt);
- }
- if (*nru > 0) {
- dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
- , &c__1);
- }
- if (*ncc > 0) {
- dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
- ;
- }
- }
-/* L40: */
- }
-
- return 0;
-
-/* End of DLASDQ */
-
-} /* dlasdq_ */
-
-/* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer *
- inode, integer *ndiml, integer *ndimr, integer *msub)
-{
- /* System generated locals */
- integer i__1, i__2;
-
- /* Builtin functions */
- double log(doublereal);
-
- /* Local variables */
- static integer i__, il, ir, maxn;
- static doublereal temp;
- static integer nlvl, llst, ncrnt;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASDT creates a tree of subproblems for bidiagonal divide and
- conquer.
-
- Arguments
- =========
-
- N (input) INTEGER
- On entry, the number of diagonal elements of the
- bidiagonal matrix.
-
- LVL (output) INTEGER
- On exit, the number of levels on the computation tree.
-
- ND (output) INTEGER
- On exit, the number of nodes on the tree.
-
- INODE (output) INTEGER array, dimension ( N )
- On exit, centers of subproblems.
-
- NDIML (output) INTEGER array, dimension ( N )
- On exit, row dimensions of left children.
-
- NDIMR (output) INTEGER array, dimension ( N )
- On exit, row dimensions of right children.
-
- MSUB (input) INTEGER.
- On entry, the maximum row dimension each subproblem at the
- bottom of the tree can be of.
-
- Further Details
- ===============
-
- Based on contributions by
- Ming Gu and Huan Ren, Computer Science Division, University of
- California at Berkeley, USA
-
- =====================================================================
-
-
- Find the number of levels on the tree.
-*/
-
- /* Parameter adjustments */
- --ndimr;
- --ndiml;
- --inode;
-
- /* Function Body */
- maxn = max(1,*n);
- temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.);
- *lvl = (integer) temp + 1;
-
- i__ = *n / 2;
- inode[1] = i__ + 1;
- ndiml[1] = i__;
- ndimr[1] = *n - i__ - 1;
- il = 0;
- ir = 1;
- llst = 1;
- i__1 = *lvl - 1;
- for (nlvl = 1; nlvl <= i__1; ++nlvl) {
-
-/*
- Constructing the tree at (NLVL+1)-st level. The number of
- nodes created on this level is LLST * 2.
-*/
-
- i__2 = llst - 1;
- for (i__ = 0; i__ <= i__2; ++i__) {
- il += 2;
- ir += 2;
- ncrnt = llst + i__;
- ndiml[il] = ndiml[ncrnt] / 2;
- ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
- inode[il] = inode[ncrnt] - ndimr[il] - 1;
- ndiml[ir] = ndimr[ncrnt] / 2;
- ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
- inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
-/* L10: */
- }
- llst <<= 1;
-/* L20: */
- }
- *nd = ((llst) << (1)) - 1;
-
- return 0;
-
-/* End of DLASDT */
-
-} /* dlasdt_ */
-
-/* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal *
- alpha, doublereal *beta, doublereal *a, integer *lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j;
- extern logical lsame_(char *, char *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLASET initializes an m-by-n matrix A to BETA on the diagonal and
- ALPHA on the offdiagonals.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- Specifies the part of the matrix A to be set.
- = 'U': Upper triangular part is set; the strictly lower
- triangular part of A is not changed.
- = 'L': Lower triangular part is set; the strictly upper
- triangular part of A is not changed.
- Otherwise: All of the matrix A is set.
-
- M (input) INTEGER
- The number of rows of the matrix A. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix A. N >= 0.
-
- ALPHA (input) DOUBLE PRECISION
- The constant to which the offdiagonal elements are to be set.
-
- BETA (input) DOUBLE PRECISION
- The constant to which the diagonal elements are to be set.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On exit, the leading m-by-n submatrix of A is set as follows:
-
- if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n,
- if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n,
- otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j,
-
- and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n).
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- if (lsame_(uplo, "U")) {
-
-/*
- Set the strictly upper triangular or trapezoidal part of the
- array to ALPHA.
-*/
-
- i__1 = *n;
- for (j = 2; j <= i__1; ++j) {
-/* Computing MIN */
- i__3 = j - 1;
- i__2 = min(i__3,*m);
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = *alpha;
-/* L10: */
- }
-/* L20: */
- }
-
- } else if (lsame_(uplo, "L")) {
-
-/*
- Set the strictly lower triangular or trapezoidal part of the
- array to ALPHA.
-*/
-
- i__1 = min(*m,*n);
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = *alpha;
-/* L30: */
- }
-/* L40: */
- }
-
- } else {
-
-/* Set the leading m-by-n submatrix to ALPHA. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = *alpha;
-/* L50: */
- }
-/* L60: */
- }
- }
-
-/* Set the first min(M,N) diagonal elements to BETA. */
-
- i__1 = min(*m,*n);
- for (i__ = 1; i__ <= i__1; ++i__) {
- a[i__ + i__ * a_dim1] = *beta;
-/* L70: */
- }
-
- return 0;
-
-/* End of DLASET */
-
-} /* dlaset_ */
-
-/* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e,
- doublereal *work, integer *info)
-{
- /* System generated locals */
- integer i__1, i__2;
- doublereal d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__;
- static doublereal eps;
- extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
- *, doublereal *, doublereal *);
- static doublereal scale;
- static integer iinfo;
- static doublereal sigmn;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static doublereal sigmx;
- extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *);
-
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *);
- static doublereal safmin;
- extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_(
- char *, integer *, doublereal *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASQ1 computes the singular values of a real N-by-N bidiagonal
- matrix with diagonal D and off-diagonal E. The singular values
- are computed to high relative accuracy, in the absence of
- denormalization, underflow and overflow. The algorithm was first
- presented in
-
- "Accurate singular values and differential qd algorithms" by K. V.
- Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230,
- 1994,
-
- and the present implementation is described in "An implementation of
- the dqds Algorithm (Positive Case)", LAPACK Working Note.
-
- Arguments
- =========
-
- N (input) INTEGER
- The number of rows and columns in the matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, D contains the diagonal elements of the
- bidiagonal matrix whose SVD is desired. On normal exit,
- D contains the singular values in decreasing order.
-
- E (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, elements E(1:N-1) contain the off-diagonal elements
- of the bidiagonal matrix whose SVD is desired.
- On exit, E is overwritten.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: the algorithm failed
- = 1, a split was marked by a positive value in E
- = 2, current block of Z not diagonalized after 30*N
- iterations (in inner while loop)
- = 3, termination criterion of outer while loop not met
- (program created more than N unreduced blocks)
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --work;
- --e;
- --d__;
-
- /* Function Body */
- *info = 0;
- if (*n < 0) {
- *info = -2;
- i__1 = -(*info);
- xerbla_("DLASQ1", &i__1);
- return 0;
- } else if (*n == 0) {
- return 0;
- } else if (*n == 1) {
- d__[1] = abs(d__[1]);
- return 0;
- } else if (*n == 2) {
- dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
- d__[1] = sigmx;
- d__[2] = sigmn;
- return 0;
- }
-
-/* Estimate the largest singular value. */
-
- sigmx = 0.;
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__[i__] = (d__1 = d__[i__], abs(d__1));
-/* Computing MAX */
- d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1));
- sigmx = max(d__2,d__3);
-/* L10: */
- }
- d__[*n] = (d__1 = d__[*n], abs(d__1));
-
-/* Early return if SIGMX is zero (matrix is already diagonal). */
-
- if (sigmx == 0.) {
- dlasrt_("D", n, &d__[1], &iinfo);
- return 0;
- }
-
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing MAX */
- d__1 = sigmx, d__2 = d__[i__];
- sigmx = max(d__1,d__2);
-/* L20: */
- }
-
-/*
- Copy D and E into WORK (in the Z format) and scale (squaring the
- input data makes scaling by a power of the radix pointless).
-*/
-
- eps = PRECISION;
- safmin = SAFEMINIMUM;
- scale = sqrt(eps / safmin);
- dcopy_(n, &d__[1], &c__1, &work[1], &c__2);
- i__1 = *n - 1;
- dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
- i__1 = ((*n) << (1)) - 1;
- i__2 = ((*n) << (1)) - 1;
- dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
- &iinfo);
-
-/* Compute the q's and e's. */
-
- i__1 = ((*n) << (1)) - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
-/* Computing 2nd power */
- d__1 = work[i__];
- work[i__] = d__1 * d__1;
-/* L30: */
- }
- work[*n * 2] = 0.;
-
- dlasq2_(n, &work[1], info);
-
- if (*info == 0) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- d__[i__] = sqrt(work[i__]);
-/* L40: */
- }
- dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
- iinfo);
- }
-
- return 0;
-
-/* End of DLASQ1 */
-
-} /* dlasq1_ */
-
-/* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info)
-{
- /* System generated locals */
- integer i__1, i__2, i__3;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal d__, e;
- static integer k;
- static doublereal s, t;
- static integer i0, i4, n0, pp;
- static doublereal eps, tol;
- static integer ipn4;
- static doublereal tol2;
- static logical ieee;
- static integer nbig;
- static doublereal dmin__, emin, emax;
- static integer ndiv, iter;
- static doublereal qmin, temp, qmax, zmax;
- static integer splt, nfail;
- static doublereal desig, trace, sigma;
- static integer iinfo;
- extern /* Subroutine */ int dlasq3_(integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, doublereal *, doublereal *,
- integer *, integer *, integer *, logical *);
-
- static integer iwhila, iwhilb;
- static doublereal oldemn, safmin;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
- integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASQ2 computes all the eigenvalues of the symmetric positive
- definite tridiagonal matrix associated with the qd array Z to high
- relative accuracy are computed to high relative accuracy, in the
- absence of denormalization, underflow and overflow.
-
- To see the relation of Z to the tridiagonal matrix, let L be a
- unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and
- let U be an upper bidiagonal matrix with 1's above and diagonal
- Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the
- symmetric tridiagonal to which it is similar.
-
- Note : DLASQ2 defines a logical variable, IEEE, which is true
- on machines which follow ieee-754 floating-point standard in their
- handling of infinities and NaNs, and false otherwise. This variable
- is passed to DLASQ3.
-
- Arguments
- =========
-
- N (input) INTEGER
- The number of rows and columns in the matrix. N >= 0.
-
- Z (workspace) DOUBLE PRECISION array, dimension ( 4*N )
- On entry Z holds the qd array. On exit, entries 1 to N hold
- the eigenvalues in decreasing order, Z( 2*N+1 ) holds the
- trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If
- N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 )
- holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of
- shifts that failed.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if the i-th argument is a scalar and had an illegal
- value, then INFO = -i, if the i-th argument is an
- array and the j-entry had an illegal value, then
- INFO = -(i*100+j)
- > 0: the algorithm failed
- = 1, a split was marked by a positive value in E
- = 2, current block of Z not diagonalized after 30*N
- iterations (in inner while loop)
- = 3, termination criterion of outer while loop not met
- (program created more than N unreduced blocks)
-
- Further Details
- ===============
- Local Variables: I0:N0 defines a current unreduced segment of Z.
- The shifts are accumulated in SIGMA. Iteration count is in ITER.
- Ping-pong is controlled by PP (alternates between 0 and 1).
-
- =====================================================================
-
-
- Test the input arguments.
- (in case DLASQ2 is not called by DLASQ1)
-*/
-
- /* Parameter adjustments */
- --z__;
-
- /* Function Body */
- *info = 0;
- eps = PRECISION;
- safmin = SAFEMINIMUM;
- tol = eps * 100.;
-/* Computing 2nd power */
- d__1 = tol;
- tol2 = d__1 * d__1;
-
- if (*n < 0) {
- *info = -1;
- xerbla_("DLASQ2", &c__1);
- return 0;
- } else if (*n == 0) {
- return 0;
- } else if (*n == 1) {
-
-/* 1-by-1 case. */
-
- if (z__[1] < 0.) {
- *info = -201;
- xerbla_("DLASQ2", &c__2);
- }
- return 0;
- } else if (*n == 2) {
-
-/* 2-by-2 case. */
-
- if (z__[2] < 0. || z__[3] < 0.) {
- *info = -2;
- xerbla_("DLASQ2", &c__2);
- return 0;
- } else if (z__[3] > z__[1]) {
- d__ = z__[3];
- z__[3] = z__[1];
- z__[1] = d__;
- }
- z__[5] = z__[1] + z__[2] + z__[3];
- if (z__[2] > z__[3] * tol2) {
- t = (z__[1] - z__[3] + z__[2]) * .5;
- s = z__[3] * (z__[2] / t);
- if (s <= t) {
- s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.)));
- } else {
- s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
- }
- t = z__[1] + (s + z__[2]);
- z__[3] *= z__[1] / t;
- z__[1] = t;
- }
- z__[2] = z__[3];
- z__[6] = z__[2] + z__[1];
- return 0;
- }
-
-/* Check for negative data and compute sums of q's and e's. */
-
- z__[*n * 2] = 0.;
- emin = z__[2];
- qmax = 0.;
- zmax = 0.;
- d__ = 0.;
- e = 0.;
-
- i__1 = (*n - 1) << (1);
- for (k = 1; k <= i__1; k += 2) {
- if (z__[k] < 0.) {
- *info = -(k + 200);
- xerbla_("DLASQ2", &c__2);
- return 0;
- } else if (z__[k + 1] < 0.) {
- *info = -(k + 201);
- xerbla_("DLASQ2", &c__2);
- return 0;
- }
- d__ += z__[k];
- e += z__[k + 1];
-/* Computing MAX */
- d__1 = qmax, d__2 = z__[k];
- qmax = max(d__1,d__2);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[k + 1];
- emin = min(d__1,d__2);
-/* Computing MAX */
- d__1 = max(qmax,zmax), d__2 = z__[k + 1];
- zmax = max(d__1,d__2);
-/* L10: */
- }
- if (z__[((*n) << (1)) - 1] < 0.) {
- *info = -(((*n) << (1)) + 199);
- xerbla_("DLASQ2", &c__2);
- return 0;
- }
- d__ += z__[((*n) << (1)) - 1];
-/* Computing MAX */
- d__1 = qmax, d__2 = z__[((*n) << (1)) - 1];
- qmax = max(d__1,d__2);
- zmax = max(qmax,zmax);
-
-/* Check for diagonality. */
-
- if (e == 0.) {
- i__1 = *n;
- for (k = 2; k <= i__1; ++k) {
- z__[k] = z__[((k) << (1)) - 1];
-/* L20: */
- }
- dlasrt_("D", n, &z__[1], &iinfo);
- z__[((*n) << (1)) - 1] = d__;
- return 0;
- }
-
- trace = d__ + e;
-
-/* Check for zero data. */
-
- if (trace == 0.) {
- z__[((*n) << (1)) - 1] = 0.;
- return 0;
- }
-
-/* Check whether the machine is IEEE conformable. */
-
- ieee = (ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (
- ftnlen)6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1,
- &c__2, &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1);
-
-/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
-
- for (k = (*n) << (1); k >= 2; k += -2) {
- z__[k * 2] = 0.;
- z__[((k) << (1)) - 1] = z__[k];
- z__[((k) << (1)) - 2] = 0.;
- z__[((k) << (1)) - 3] = z__[k - 1];
-/* L30: */
- }
-
- i0 = 1;
- n0 = *n;
-
-/* Reverse the qd-array, if warranted. */
-
- if (z__[((i0) << (2)) - 3] * 1.5 < z__[((n0) << (2)) - 3]) {
- ipn4 = (i0 + n0) << (2);
- i__1 = (i0 + n0 - 1) << (1);
- for (i4 = (i0) << (2); i4 <= i__1; i4 += 4) {
- temp = z__[i4 - 3];
- z__[i4 - 3] = z__[ipn4 - i4 - 3];
- z__[ipn4 - i4 - 3] = temp;
- temp = z__[i4 - 1];
- z__[i4 - 1] = z__[ipn4 - i4 - 5];
- z__[ipn4 - i4 - 5] = temp;
-/* L40: */
- }
- }
-
-/* Initial split checking via dqd and Li's test. */
-
- pp = 0;
-
- for (k = 1; k <= 2; ++k) {
-
- d__ = z__[((n0) << (2)) + pp - 3];
- i__1 = ((i0) << (2)) + pp;
- for (i4 = ((n0 - 1) << (2)) + pp; i4 >= i__1; i4 += -4) {
- if (z__[i4 - 1] <= tol2 * d__) {
- z__[i4 - 1] = -0.;
- d__ = z__[i4 - 3];
- } else {
- d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
- }
-/* L50: */
- }
-
-/* dqd maps Z to ZZ plus Li's test. */
-
- emin = z__[((i0) << (2)) + pp + 1];
- d__ = z__[((i0) << (2)) + pp - 3];
- i__1 = ((n0 - 1) << (2)) + pp;
- for (i4 = ((i0) << (2)) + pp; i4 <= i__1; i4 += 4) {
- z__[i4 - ((pp) << (1)) - 2] = d__ + z__[i4 - 1];
- if (z__[i4 - 1] <= tol2 * d__) {
- z__[i4 - 1] = -0.;
- z__[i4 - ((pp) << (1)) - 2] = d__;
- z__[i4 - ((pp) << (1))] = 0.;
- d__ = z__[i4 + 1];
- } else if ((safmin * z__[i4 + 1] < z__[i4 - ((pp) << (1)) - 2] &&
- safmin * z__[i4 - ((pp) << (1)) - 2] < z__[i4 + 1])) {
- temp = z__[i4 + 1] / z__[i4 - ((pp) << (1)) - 2];
- z__[i4 - ((pp) << (1))] = z__[i4 - 1] * temp;
- d__ *= temp;
- } else {
- z__[i4 - ((pp) << (1))] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4
- - ((pp) << (1)) - 2]);
- d__ = z__[i4 + 1] * (d__ / z__[i4 - ((pp) << (1)) - 2]);
- }
-/* Computing MIN */
- d__1 = emin, d__2 = z__[i4 - ((pp) << (1))];
- emin = min(d__1,d__2);
-/* L60: */
- }
- z__[((n0) << (2)) - pp - 2] = d__;
-
-/* Now find qmax. */
-
- qmax = z__[((i0) << (2)) - pp - 2];
- i__1 = ((n0) << (2)) - pp - 2;
- for (i4 = ((i0) << (2)) - pp + 2; i4 <= i__1; i4 += 4) {
-/* Computing MAX */
- d__1 = qmax, d__2 = z__[i4];
- qmax = max(d__1,d__2);
-/* L70: */
- }
-
-/* Prepare for the next iteration on K. */
-
- pp = 1 - pp;
-/* L80: */
- }
-
- iter = 2;
- nfail = 0;
- ndiv = (n0 - i0) << (1);
-
- i__1 = *n + 1;
- for (iwhila = 1; iwhila <= i__1; ++iwhila) {
- if (n0 < 1) {
- goto L150;
- }
-
-/*
- While array unfinished do
-
- E(N0) holds the value of SIGMA when submatrix in I0:N0
- splits from the rest of the array, but is negated.
-*/
-
- desig = 0.;
- if (n0 == *n) {
- sigma = 0.;
- } else {
- sigma = -z__[((n0) << (2)) - 1];
- }
- if (sigma < 0.) {
- *info = 1;
- return 0;
- }
-
-/*
- Find last unreduced submatrix's top index I0, find QMAX and
- EMIN. Find Gershgorin-type bound if Q's much greater than E's.
-*/
-
- emax = 0.;
- if (n0 > i0) {
- emin = (d__1 = z__[((n0) << (2)) - 5], abs(d__1));
- } else {
- emin = 0.;
- }
- qmin = z__[((n0) << (2)) - 3];
- qmax = qmin;
- for (i4 = (n0) << (2); i4 >= 8; i4 += -4) {
- if (z__[i4 - 5] <= 0.) {
- goto L100;
- }
- if (qmin >= emax * 4.) {
-/* Computing MIN */
- d__1 = qmin, d__2 = z__[i4 - 3];
- qmin = min(d__1,d__2);
-/* Computing MAX */
- d__1 = emax, d__2 = z__[i4 - 5];
- emax = max(d__1,d__2);
- }
-/* Computing MAX */
- d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5];
- qmax = max(d__1,d__2);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[i4 - 5];
- emin = min(d__1,d__2);
-/* L90: */
- }
- i4 = 4;
-
-L100:
- i0 = i4 / 4;
-
-/* Store EMIN for passing to DLASQ3. */
-
- z__[((n0) << (2)) - 1] = emin;
-
-/*
- Put -(initial shift) into DMIN.
-
- Computing MAX
-*/
- d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax);
- dmin__ = -max(d__1,d__2);
-
-/* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */
-
- pp = 0;
-
- nbig = (n0 - i0 + 1) * 30;
- i__2 = nbig;
- for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
- if (i0 > n0) {
- goto L130;
- }
-
-/* While submatrix unfinished take a good dqds step. */
-
- dlasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
- nfail, &iter, &ndiv, &ieee);
-
- pp = 1 - pp;
-
-/* When EMIN is very small check for splits. */
-
- if ((pp == 0 && n0 - i0 >= 3)) {
- if (z__[n0 * 4] <= tol2 * qmax || z__[((n0) << (2)) - 1] <=
- tol2 * sigma) {
- splt = i0 - 1;
- qmax = z__[((i0) << (2)) - 3];
- emin = z__[((i0) << (2)) - 1];
- oldemn = z__[i0 * 4];
- i__3 = (n0 - 3) << (2);
- for (i4 = (i0) << (2); i4 <= i__3; i4 += 4) {
- if (z__[i4] <= tol2 * z__[i4 - 3] || z__[i4 - 1] <=
- tol2 * sigma) {
- z__[i4 - 1] = -sigma;
- splt = i4 / 4;
- qmax = 0.;
- emin = z__[i4 + 3];
- oldemn = z__[i4 + 4];
- } else {
-/* Computing MAX */
- d__1 = qmax, d__2 = z__[i4 + 1];
- qmax = max(d__1,d__2);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[i4 - 1];
- emin = min(d__1,d__2);
-/* Computing MIN */
- d__1 = oldemn, d__2 = z__[i4];
- oldemn = min(d__1,d__2);
- }
-/* L110: */
- }
- z__[((n0) << (2)) - 1] = emin;
- z__[n0 * 4] = oldemn;
- i0 = splt + 1;
- }
- }
-
-/* L120: */
- }
-
- *info = 2;
- return 0;
-
-/* end IWHILB */
-
-L130:
-
-/* L140: */
- ;
- }
-
- *info = 3;
- return 0;
-
-/* end IWHILA */
-
-L150:
-
-/* Move q's to the front. */
-
- i__1 = *n;
- for (k = 2; k <= i__1; ++k) {
- z__[k] = z__[((k) << (2)) - 3];
-/* L160: */
- }
-
-/* Sort and compute sum of eigenvalues. */
-
- dlasrt_("D", n, &z__[1], &iinfo);
-
- e = 0.;
- for (k = *n; k >= 1; --k) {
- e += z__[k];
-/* L170: */
- }
-
-/* Store trace, sum(eigenvalues) and information on performance. */
-
- z__[((*n) << (1)) + 1] = trace;
- z__[((*n) << (1)) + 2] = e;
- z__[((*n) << (1)) + 3] = (doublereal) iter;
-/* Computing 2nd power */
- i__1 = *n;
- z__[((*n) << (1)) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1);
- z__[((*n) << (1)) + 5] = nfail * 100. / (doublereal) iter;
- return 0;
-
-/* End of DLASQ2 */
-
-} /* dlasq2_ */
-
-/* Subroutine */ int dlasq3_(integer *i0, integer *n0, doublereal *z__,
- integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig,
- doublereal *qmax, integer *nfail, integer *iter, integer *ndiv,
- logical *ieee)
-{
- /* Initialized data */
-
- static integer ttype = 0;
- static doublereal dmin1 = 0.;
- static doublereal dmin2 = 0.;
- static doublereal dn = 0.;
- static doublereal dn1 = 0.;
- static doublereal dn2 = 0.;
- static doublereal tau = 0.;
-
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal s, t;
- static integer j4, nn;
- static doublereal eps, tol;
- static integer n0in, ipn4;
- static doublereal tol2, temp;
- extern /* Subroutine */ int dlasq4_(integer *, integer *, doublereal *,
- integer *, integer *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *)
- , dlasq5_(integer *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, logical *), dlasq6_(
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *);
-
- static doublereal safmin;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- May 17, 2000
-
-
- Purpose
- =======
-
- DLASQ3 checks for deflation, computes a shift (TAU) and calls dqds.
- In case of failure it changes shifts, and tries again until output
- is positive.
-
- Arguments
- =========
-
- I0 (input) INTEGER
- First index.
-
- N0 (input) INTEGER
- Last index.
-
- Z (input) DOUBLE PRECISION array, dimension ( 4*N )
- Z holds the qd array.
-
- PP (input) INTEGER
- PP=0 for ping, PP=1 for pong.
-
- DMIN (output) DOUBLE PRECISION
- Minimum value of d.
-
- SIGMA (output) DOUBLE PRECISION
- Sum of shifts used in current segment.
-
- DESIG (input/output) DOUBLE PRECISION
- Lower order part of SIGMA
-
- QMAX (input) DOUBLE PRECISION
- Maximum value of q.
-
- NFAIL (output) INTEGER
- Number of times shift was too big.
-
- ITER (output) INTEGER
- Number of iterations.
-
- NDIV (output) INTEGER
- Number of divisions.
-
- TTYPE (output) INTEGER
- Shift type.
-
- IEEE (input) LOGICAL
- Flag for IEEE or non IEEE arithmetic (passed to DLASQ5).
-
- =====================================================================
-*/
-
- /* Parameter adjustments */
- --z__;
-
- /* Function Body */
-
- n0in = *n0;
- eps = PRECISION;
- safmin = SAFEMINIMUM;
- tol = eps * 100.;
-/* Computing 2nd power */
- d__1 = tol;
- tol2 = d__1 * d__1;
-
-/* Check for deflation. */
-
-L10:
-
- if (*n0 < *i0) {
- return 0;
- }
- if (*n0 == *i0) {
- goto L20;
- }
- nn = ((*n0) << (2)) + *pp;
- if (*n0 == *i0 + 1) {
- goto L40;
- }
-
-/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
-
- if ((z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - ((*pp) << (1)
- ) - 4] > tol2 * z__[nn - 7])) {
- goto L30;
- }
-
-L20:
-
- z__[((*n0) << (2)) - 3] = z__[((*n0) << (2)) + *pp - 3] + *sigma;
- --(*n0);
- goto L10;
-
-/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
-
-L30:
-
- if ((z__[nn - 9] > tol2 * *sigma && z__[nn - ((*pp) << (1)) - 8] > tol2 *
- z__[nn - 11])) {
- goto L50;
- }
-
-L40:
-
- if (z__[nn - 3] > z__[nn - 7]) {
- s = z__[nn - 3];
- z__[nn - 3] = z__[nn - 7];
- z__[nn - 7] = s;
- }
- if (z__[nn - 5] > z__[nn - 3] * tol2) {
- t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5;
- s = z__[nn - 3] * (z__[nn - 5] / t);
- if (s <= t) {
- s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.)));
- } else {
- s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
- }
- t = z__[nn - 7] + (s + z__[nn - 5]);
- z__[nn - 3] *= z__[nn - 7] / t;
- z__[nn - 7] = t;
- }
- z__[((*n0) << (2)) - 7] = z__[nn - 7] + *sigma;
- z__[((*n0) << (2)) - 3] = z__[nn - 3] + *sigma;
- *n0 += -2;
- goto L10;
-
-L50:
-
-/* Reverse the qd-array, if warranted. */
-
- if (*dmin__ <= 0. || *n0 < n0in) {
- if (z__[((*i0) << (2)) + *pp - 3] * 1.5 < z__[((*n0) << (2)) + *pp -
- 3]) {
- ipn4 = (*i0 + *n0) << (2);
- i__1 = (*i0 + *n0 - 1) << (1);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- temp = z__[j4 - 3];
- z__[j4 - 3] = z__[ipn4 - j4 - 3];
- z__[ipn4 - j4 - 3] = temp;
- temp = z__[j4 - 2];
- z__[j4 - 2] = z__[ipn4 - j4 - 2];
- z__[ipn4 - j4 - 2] = temp;
- temp = z__[j4 - 1];
- z__[j4 - 1] = z__[ipn4 - j4 - 5];
- z__[ipn4 - j4 - 5] = temp;
- temp = z__[j4];
- z__[j4] = z__[ipn4 - j4 - 4];
- z__[ipn4 - j4 - 4] = temp;
-/* L60: */
- }
- if (*n0 - *i0 <= 4) {
- z__[((*n0) << (2)) + *pp - 1] = z__[((*i0) << (2)) + *pp - 1];
- z__[((*n0) << (2)) - *pp] = z__[((*i0) << (2)) - *pp];
- }
-/* Computing MIN */
- d__1 = dmin2, d__2 = z__[((*n0) << (2)) + *pp - 1];
- dmin2 = min(d__1,d__2);
-/* Computing MIN */
- d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*i0) << (2)) +
- *pp - 1], d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2))
- + *pp + 3];
- z__[((*n0) << (2)) + *pp - 1] = min(d__1,d__2);
-/* Computing MIN */
- d__1 = z__[((*n0) << (2)) - *pp], d__2 = z__[((*i0) << (2)) - *pp]
- , d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2)) - *pp
- + 4];
- z__[((*n0) << (2)) - *pp] = min(d__1,d__2);
-/* Computing MAX */
- d__1 = *qmax, d__2 = z__[((*i0) << (2)) + *pp - 3], d__1 = max(
- d__1,d__2), d__2 = z__[((*i0) << (2)) + *pp + 1];
- *qmax = max(d__1,d__2);
- *dmin__ = -0.;
- }
- }
-
-/*
- L70:
-
- Computing MIN
-*/
- d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*n0) << (2)) + *pp - 9]
- , d__1 = min(d__1,d__2), d__2 = dmin2 + z__[((*n0) << (2)) - *pp];
- if (*dmin__ < 0. || safmin * *qmax < min(d__1,d__2)) {
-
-/* Choose a shift. */
-
- dlasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1,
- &dn2, &tau, &ttype);
-
-/* Call dqds until DMIN > 0. */
-
-L80:
-
- dlasq5_(i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1,
- &dn2, ieee);
-
- *ndiv += *n0 - *i0 + 2;
- ++(*iter);
-
-/* Check status. */
-
- if ((*dmin__ >= 0. && dmin1 > 0.)) {
-
-/* Success. */
-
- goto L100;
-
- } else if ((((*dmin__ < 0. && dmin1 > 0.) && z__[((*n0 - 1) << (2)) -
- *pp] < tol * (*sigma + dn1)) && abs(dn) < tol * *sigma)) {
-
-/* Convergence hidden by negative DN. */
-
- z__[((*n0 - 1) << (2)) - *pp + 2] = 0.;
- *dmin__ = 0.;
- goto L100;
- } else if (*dmin__ < 0.) {
-
-/* TAU too big. Select new TAU and try again. */
-
- ++(*nfail);
- if (ttype < -22) {
-
-/* Failed twice. Play it safe. */
-
- tau = 0.;
- } else if (dmin1 > 0.) {
-
-/* Late failure. Gives excellent shift. */
-
- tau = (tau + *dmin__) * (1. - eps * 2.);
- ttype += -11;
- } else {
-
-/* Early failure. Divide by 4. */
-
- tau *= .25;
- ttype += -12;
- }
- goto L80;
- } else if (*dmin__ != *dmin__) {
-
-/* NaN. */
-
- tau = 0.;
- goto L80;
- } else {
-
-/* Possible underflow. Play it safe. */
-
- goto L90;
- }
- }
-
-/* Risk of underflow. */
-
-L90:
- dlasq6_(i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2);
- *ndiv += *n0 - *i0 + 2;
- ++(*iter);
- tau = 0.;
-
-L100:
- if (tau < *sigma) {
- *desig += tau;
- t = *sigma + *desig;
- *desig -= t - *sigma;
- } else {
- t = *sigma + tau;
- *desig = *sigma - (t - tau) + *desig;
- }
- *sigma = t;
-
- return 0;
-
-/* End of DLASQ3 */
-
-} /* dlasq3_ */
-
-/* Subroutine */ int dlasq4_(integer *i0, integer *n0, doublereal *z__,
- integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1,
- doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2,
- doublereal *tau, integer *ttype)
-{
- /* Initialized data */
-
- static doublereal g = 0.;
-
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal s, a2, b1, b2;
- static integer i4, nn, np;
- static doublereal gam, gap1, gap2;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASQ4 computes an approximation TAU to the smallest eigenvalue
- using values of d from the previous transform.
-
- I0 (input) INTEGER
- First index.
-
- N0 (input) INTEGER
- Last index.
-
- Z (input) DOUBLE PRECISION array, dimension ( 4*N )
- Z holds the qd array.
-
- PP (input) INTEGER
- PP=0 for ping, PP=1 for pong.
-
- NOIN (input) INTEGER
- The value of N0 at start of EIGTEST.
-
- DMIN (input) DOUBLE PRECISION
- Minimum value of d.
-
- DMIN1 (input) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ).
-
- DMIN2 (input) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ) and D( N0-1 ).
-
- DN (input) DOUBLE PRECISION
- d(N)
-
- DN1 (input) DOUBLE PRECISION
- d(N-1)
-
- DN2 (input) DOUBLE PRECISION
- d(N-2)
-
- TAU (output) DOUBLE PRECISION
- This is the shift.
-
- TTYPE (output) INTEGER
- Shift type.
-
- Further Details
- ===============
- CNST1 = 9/16
-
- =====================================================================
-*/
-
- /* Parameter adjustments */
- --z__;
-
- /* Function Body */
-
-/*
- A negative DMIN forces the shift to take that absolute value
- TTYPE records the type of shift.
-*/
-
- if (*dmin__ <= 0.) {
- *tau = -(*dmin__);
- *ttype = -1;
- return 0;
- }
-
- nn = ((*n0) << (2)) + *pp;
- if (*n0in == *n0) {
-
-/* No eigenvalues deflated. */
-
- if (*dmin__ == *dn || *dmin__ == *dn1) {
-
- b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
- b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
- a2 = z__[nn - 7] + z__[nn - 5];
-
-/* Cases 2 and 3. */
-
- if ((*dmin__ == *dn && *dmin1 == *dn1)) {
- gap2 = *dmin2 - a2 - *dmin2 * .25;
- if ((gap2 > 0. && gap2 > b2)) {
- gap1 = a2 - *dn - b2 / gap2 * b2;
- } else {
- gap1 = a2 - *dn - (b1 + b2);
- }
- if ((gap1 > 0. && gap1 > b1)) {
-/* Computing MAX */
- d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5;
- s = max(d__1,d__2);
- *ttype = -2;
- } else {
- s = 0.;
- if (*dn > b1) {
- s = *dn - b1;
- }
- if (a2 > b1 + b2) {
-/* Computing MIN */
- d__1 = s, d__2 = a2 - (b1 + b2);
- s = min(d__1,d__2);
- }
-/* Computing MAX */
- d__1 = s, d__2 = *dmin__ * .333;
- s = max(d__1,d__2);
- *ttype = -3;
- }
- } else {
-
-/* Case 4. */
-
- *ttype = -4;
- s = *dmin__ * .25;
- if (*dmin__ == *dn) {
- gam = *dn;
- a2 = 0.;
- if (z__[nn - 5] > z__[nn - 7]) {
- return 0;
- }
- b2 = z__[nn - 5] / z__[nn - 7];
- np = nn - 9;
- } else {
- np = nn - ((*pp) << (1));
- b2 = z__[np - 2];
- gam = *dn1;
- if (z__[np - 4] > z__[np - 2]) {
- return 0;
- }
- a2 = z__[np - 4] / z__[np - 2];
- if (z__[nn - 9] > z__[nn - 11]) {
- return 0;
- }
- b2 = z__[nn - 9] / z__[nn - 11];
- np = nn - 13;
- }
-
-/* Approximate contribution to norm squared from I < NN-1. */
-
- a2 += b2;
- i__1 = ((*i0) << (2)) - 1 + *pp;
- for (i4 = np; i4 >= i__1; i4 += -4) {
- if (b2 == 0.) {
- goto L20;
- }
- b1 = b2;
- if (z__[i4] > z__[i4 - 2]) {
- return 0;
- }
- b2 *= z__[i4] / z__[i4 - 2];
- a2 += b2;
- if (max(b2,b1) * 100. < a2 || .563 < a2) {
- goto L20;
- }
-/* L10: */
- }
-L20:
- a2 *= 1.05;
-
-/* Rayleigh quotient residual bound. */
-
- if (a2 < .563) {
- s = gam * (1. - sqrt(a2)) / (a2 + 1.);
- }
- }
- } else if (*dmin__ == *dn2) {
-
-/* Case 5. */
-
- *ttype = -5;
- s = *dmin__ * .25;
-
-/* Compute contribution to norm squared from I > NN-2. */
-
- np = nn - ((*pp) << (1));
- b1 = z__[np - 2];
- b2 = z__[np - 6];
- gam = *dn2;
- if (z__[np - 8] > b2 || z__[np - 4] > b1) {
- return 0;
- }
- a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.);
-
-/* Approximate contribution to norm squared from I < NN-2. */
-
- if (*n0 - *i0 > 2) {
- b2 = z__[nn - 13] / z__[nn - 15];
- a2 += b2;
- i__1 = ((*i0) << (2)) - 1 + *pp;
- for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
- if (b2 == 0.) {
- goto L40;
- }
- b1 = b2;
- if (z__[i4] > z__[i4 - 2]) {
- return 0;
- }
- b2 *= z__[i4] / z__[i4 - 2];
- a2 += b2;
- if (max(b2,b1) * 100. < a2 || .563 < a2) {
- goto L40;
- }
-/* L30: */
- }
-L40:
- a2 *= 1.05;
- }
-
- if (a2 < .563) {
- s = gam * (1. - sqrt(a2)) / (a2 + 1.);
- }
- } else {
-
-/* Case 6, no information to guide us. */
-
- if (*ttype == -6) {
- g += (1. - g) * .333;
- } else if (*ttype == -18) {
- g = .083250000000000005;
- } else {
- g = .25;
- }
- s = g * *dmin__;
- *ttype = -6;
- }
-
- } else if (*n0in == *n0 + 1) {
-
-/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
-
- if ((*dmin1 == *dn1 && *dmin2 == *dn2)) {
-
-/* Cases 7 and 8. */
-
- *ttype = -7;
- s = *dmin1 * .333;
- if (z__[nn - 5] > z__[nn - 7]) {
- return 0;
- }
- b1 = z__[nn - 5] / z__[nn - 7];
- b2 = b1;
- if (b2 == 0.) {
- goto L60;
- }
- i__1 = ((*i0) << (2)) - 1 + *pp;
- for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
- a2 = b1;
- if (z__[i4] > z__[i4 - 2]) {
- return 0;
- }
- b1 *= z__[i4] / z__[i4 - 2];
- b2 += b1;
- if (max(b1,a2) * 100. < b2) {
- goto L60;
- }
-/* L50: */
- }
-L60:
- b2 = sqrt(b2 * 1.05);
-/* Computing 2nd power */
- d__1 = b2;
- a2 = *dmin1 / (d__1 * d__1 + 1.);
- gap2 = *dmin2 * .5 - a2;
- if ((gap2 > 0. && gap2 > b2 * a2)) {
-/* Computing MAX */
- d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
- s = max(d__1,d__2);
- } else {
-/* Computing MAX */
- d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
- s = max(d__1,d__2);
- *ttype = -8;
- }
- } else {
-
-/* Case 9. */
-
- s = *dmin1 * .25;
- if (*dmin1 == *dn1) {
- s = *dmin1 * .5;
- }
- *ttype = -9;
- }
-
- } else if (*n0in == *n0 + 2) {
-
-/*
- Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN.
-
- Cases 10 and 11.
-*/
-
- if ((*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7])) {
- *ttype = -10;
- s = *dmin2 * .333;
- if (z__[nn - 5] > z__[nn - 7]) {
- return 0;
- }
- b1 = z__[nn - 5] / z__[nn - 7];
- b2 = b1;
- if (b2 == 0.) {
- goto L80;
- }
- i__1 = ((*i0) << (2)) - 1 + *pp;
- for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
- if (z__[i4] > z__[i4 - 2]) {
- return 0;
- }
- b1 *= z__[i4] / z__[i4 - 2];
- b2 += b1;
- if (b1 * 100. < b2) {
- goto L80;
- }
-/* L70: */
- }
-L80:
- b2 = sqrt(b2 * 1.05);
-/* Computing 2nd power */
- d__1 = b2;
- a2 = *dmin2 / (d__1 * d__1 + 1.);
- gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
- nn - 9]) - a2;
- if ((gap2 > 0. && gap2 > b2 * a2)) {
-/* Computing MAX */
- d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
- s = max(d__1,d__2);
- } else {
-/* Computing MAX */
- d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
- s = max(d__1,d__2);
- }
- } else {
- s = *dmin2 * .25;
- *ttype = -11;
- }
- } else if (*n0in > *n0 + 2) {
-
-/* Case 12, more than two eigenvalues deflated. No information. */
-
- s = 0.;
- *ttype = -12;
- }
-
- *tau = s;
- return 0;
-
-/* End of DLASQ4 */
-
-} /* dlasq4_ */
-
-/* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__,
- integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1,
- doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2,
- logical *ieee)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static doublereal d__;
- static integer j4, j4p2;
- static doublereal emin, temp;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- May 17, 2000
-
-
- Purpose
- =======
-
- DLASQ5 computes one dqds transform in ping-pong form, one
- version for IEEE machines another for non IEEE machines.
-
- Arguments
- =========
-
- I0 (input) INTEGER
- First index.
-
- N0 (input) INTEGER
- Last index.
-
- Z (input) DOUBLE PRECISION array, dimension ( 4*N )
- Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
- an extra argument.
-
- PP (input) INTEGER
- PP=0 for ping, PP=1 for pong.
-
- TAU (input) DOUBLE PRECISION
- This is the shift.
-
- DMIN (output) DOUBLE PRECISION
- Minimum value of d.
-
- DMIN1 (output) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ).
-
- DMIN2 (output) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ) and D( N0-1 ).
-
- DN (output) DOUBLE PRECISION
- d(N0), the last value of d.
-
- DNM1 (output) DOUBLE PRECISION
- d(N0-1).
-
- DNM2 (output) DOUBLE PRECISION
- d(N0-2).
-
- IEEE (input) LOGICAL
- Flag for IEEE or non IEEE arithmetic.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --z__;
-
- /* Function Body */
- if (*n0 - *i0 - 1 <= 0) {
- return 0;
- }
-
- j4 = ((*i0) << (2)) + *pp - 3;
- emin = z__[j4 + 4];
- d__ = z__[j4] - *tau;
- *dmin__ = d__;
- *dmin1 = -z__[j4];
-
- if (*ieee) {
-
-/* Code for IEEE arithmetic. */
-
- if (*pp == 0) {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 2] = d__ + z__[j4 - 1];
- temp = z__[j4 + 1] / z__[j4 - 2];
- d__ = d__ * temp - *tau;
- *dmin__ = min(*dmin__,d__);
- z__[j4] = z__[j4 - 1] * temp;
-/* Computing MIN */
- d__1 = z__[j4];
- emin = min(d__1,emin);
-/* L10: */
- }
- } else {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 3] = d__ + z__[j4];
- temp = z__[j4 + 2] / z__[j4 - 3];
- d__ = d__ * temp - *tau;
- *dmin__ = min(*dmin__,d__);
- z__[j4 - 1] = z__[j4] * temp;
-/* Computing MIN */
- d__1 = z__[j4 - 1];
- emin = min(d__1,emin);
-/* L20: */
- }
- }
-
-/* Unroll last two steps. */
-
- *dnm2 = d__;
- *dmin2 = *dmin__;
- j4 = ((*n0 - 2) << (2)) - *pp;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm2 + z__[j4p2];
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
- *dmin__ = min(*dmin__,*dnm1);
-
- *dmin1 = *dmin__;
- j4 += 4;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm1 + z__[j4p2];
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
- *dmin__ = min(*dmin__,*dn);
-
- } else {
-
-/* Code for non IEEE arithmetic. */
-
- if (*pp == 0) {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 2] = d__ + z__[j4 - 1];
- if (d__ < 0.) {
- return 0;
- } else {
- z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
- d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
- }
- *dmin__ = min(*dmin__,d__);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[j4];
- emin = min(d__1,d__2);
-/* L30: */
- }
- } else {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 3] = d__ + z__[j4];
- if (d__ < 0.) {
- return 0;
- } else {
- z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
- d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
- }
- *dmin__ = min(*dmin__,d__);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[j4 - 1];
- emin = min(d__1,d__2);
-/* L40: */
- }
- }
-
-/* Unroll last two steps. */
-
- *dnm2 = d__;
- *dmin2 = *dmin__;
- j4 = ((*n0 - 2) << (2)) - *pp;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm2 + z__[j4p2];
- if (*dnm2 < 0.) {
- return 0;
- } else {
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
- }
- *dmin__ = min(*dmin__,*dnm1);
-
- *dmin1 = *dmin__;
- j4 += 4;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm1 + z__[j4p2];
- if (*dnm1 < 0.) {
- return 0;
- } else {
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
- }
- *dmin__ = min(*dmin__,*dn);
-
- }
-
- z__[j4 + 2] = *dn;
- z__[((*n0) << (2)) - *pp] = emin;
- return 0;
-
-/* End of DLASQ5 */
-
-} /* dlasq5_ */
-
-/* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__,
- integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2,
- doublereal *dn, doublereal *dnm1, doublereal *dnm2)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2;
-
- /* Local variables */
- static doublereal d__;
- static integer j4, j4p2;
- static doublereal emin, temp;
-
- static doublereal safmin;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1999
-
-
- Purpose
- =======
-
- DLASQ6 computes one dqd (shift equal to zero) transform in
- ping-pong form, with protection against underflow and overflow.
-
- Arguments
- =========
-
- I0 (input) INTEGER
- First index.
-
- N0 (input) INTEGER
- Last index.
-
- Z (input) DOUBLE PRECISION array, dimension ( 4*N )
- Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
- an extra argument.
-
- PP (input) INTEGER
- PP=0 for ping, PP=1 for pong.
-
- DMIN (output) DOUBLE PRECISION
- Minimum value of d.
-
- DMIN1 (output) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ).
-
- DMIN2 (output) DOUBLE PRECISION
- Minimum value of d, excluding D( N0 ) and D( N0-1 ).
-
- DN (output) DOUBLE PRECISION
- d(N0), the last value of d.
-
- DNM1 (output) DOUBLE PRECISION
- d(N0-1).
-
- DNM2 (output) DOUBLE PRECISION
- d(N0-2).
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --z__;
-
- /* Function Body */
- if (*n0 - *i0 - 1 <= 0) {
- return 0;
- }
-
- safmin = SAFEMINIMUM;
- j4 = ((*i0) << (2)) + *pp - 3;
- emin = z__[j4 + 4];
- d__ = z__[j4];
- *dmin__ = d__;
-
- if (*pp == 0) {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 2] = d__ + z__[j4 - 1];
- if (z__[j4 - 2] == 0.) {
- z__[j4] = 0.;
- d__ = z__[j4 + 1];
- *dmin__ = d__;
- emin = 0.;
- } else if ((safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
- - 2] < z__[j4 + 1])) {
- temp = z__[j4 + 1] / z__[j4 - 2];
- z__[j4] = z__[j4 - 1] * temp;
- d__ *= temp;
- } else {
- z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
- d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
- }
- *dmin__ = min(*dmin__,d__);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[j4];
- emin = min(d__1,d__2);
-/* L10: */
- }
- } else {
- i__1 = (*n0 - 3) << (2);
- for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
- z__[j4 - 3] = d__ + z__[j4];
- if (z__[j4 - 3] == 0.) {
- z__[j4 - 1] = 0.;
- d__ = z__[j4 + 2];
- *dmin__ = d__;
- emin = 0.;
- } else if ((safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
- - 3] < z__[j4 + 2])) {
- temp = z__[j4 + 2] / z__[j4 - 3];
- z__[j4 - 1] = z__[j4] * temp;
- d__ *= temp;
- } else {
- z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
- d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
- }
- *dmin__ = min(*dmin__,d__);
-/* Computing MIN */
- d__1 = emin, d__2 = z__[j4 - 1];
- emin = min(d__1,d__2);
-/* L20: */
- }
- }
-
-/* Unroll last two steps. */
-
- *dnm2 = d__;
- *dmin2 = *dmin__;
- j4 = ((*n0 - 2) << (2)) - *pp;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm2 + z__[j4p2];
- if (z__[j4 - 2] == 0.) {
- z__[j4] = 0.;
- *dnm1 = z__[j4p2 + 2];
- *dmin__ = *dnm1;
- emin = 0.;
- } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
- z__[j4p2 + 2])) {
- temp = z__[j4p2 + 2] / z__[j4 - 2];
- z__[j4] = z__[j4p2] * temp;
- *dnm1 = *dnm2 * temp;
- } else {
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
- }
- *dmin__ = min(*dmin__,*dnm1);
-
- *dmin1 = *dmin__;
- j4 += 4;
- j4p2 = j4 + ((*pp) << (1)) - 1;
- z__[j4 - 2] = *dnm1 + z__[j4p2];
- if (z__[j4 - 2] == 0.) {
- z__[j4] = 0.;
- *dn = z__[j4p2 + 2];
- *dmin__ = *dn;
- emin = 0.;
- } else if ((safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
- z__[j4p2 + 2])) {
- temp = z__[j4p2 + 2] / z__[j4 - 2];
- z__[j4] = z__[j4p2] * temp;
- *dn = *dnm1 * temp;
- } else {
- z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
- *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
- }
- *dmin__ = min(*dmin__,*dn);
-
- z__[j4 + 2] = *dn;
- z__[((*n0) << (2)) - *pp] = emin;
- return 0;
-
-/* End of DLASQ6 */
-
-} /* dlasq6_ */
-
-/* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m,
- integer *n, doublereal *c__, doublereal *s, doublereal *a, integer *
- lda)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, info;
- static doublereal temp;
- extern logical lsame_(char *, char *);
- static doublereal ctemp, stemp;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLASR performs the transformation
-
- A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
-
- A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
-
- where A is an m by n real matrix and P is an orthogonal matrix,
- consisting of a sequence of plane rotations determined by the
- parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
- and z = n when SIDE = 'R' or 'r' ):
-
- When DIRECT = 'F' or 'f' ( Forward sequence ) then
-
- P = P( z - 1 )*...*P( 2 )*P( 1 ),
-
- and when DIRECT = 'B' or 'b' ( Backward sequence ) then
-
- P = P( 1 )*P( 2 )*...*P( z - 1 ),
-
- where P( k ) is a plane rotation matrix for the following planes:
-
- when PIVOT = 'V' or 'v' ( Variable pivot ),
- the plane ( k, k + 1 )
-
- when PIVOT = 'T' or 't' ( Top pivot ),
- the plane ( 1, k + 1 )
-
- when PIVOT = 'B' or 'b' ( Bottom pivot ),
- the plane ( k, z )
-
- c( k ) and s( k ) must contain the cosine and sine that define the
- matrix P( k ). The two by two plane rotation part of the matrix
- P( k ), R( k ), is assumed to be of the form
-
- R( k ) = ( c( k ) s( k ) ).
- ( -s( k ) c( k ) )
-
- This version vectorises across rows of the array A when SIDE = 'L'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- Specifies whether the plane rotation matrix P is applied to
- A on the left or the right.
- = 'L': Left, compute A := P*A
- = 'R': Right, compute A:= A*P'
-
- DIRECT (input) CHARACTER*1
- Specifies whether P is a forward or backward sequence of
- plane rotations.
- = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
- = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
-
- PIVOT (input) CHARACTER*1
- Specifies the plane for which P(k) is a plane rotation
- matrix.
- = 'V': Variable pivot, the plane (k,k+1)
- = 'T': Top pivot, the plane (1,k+1)
- = 'B': Bottom pivot, the plane (k,z)
-
- M (input) INTEGER
- The number of rows of the matrix A. If m <= 1, an immediate
- return is effected.
-
- N (input) INTEGER
- The number of columns of the matrix A. If n <= 1, an
- immediate return is effected.
-
- C, S (input) DOUBLE PRECISION arrays, dimension
- (M-1) if SIDE = 'L'
- (N-1) if SIDE = 'R'
- c(k) and s(k) contain the cosine and sine that define the
- matrix P(k). The two by two plane rotation part of the
- matrix P(k), R(k), is assumed to be of the form
- R( k ) = ( c( k ) s( k ) ).
- ( -s( k ) c( k ) )
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- The m by n matrix A. On exit, A is overwritten by P*A if
- SIDE = 'R' or by A*P' if SIDE = 'L'.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- --c__;
- --s;
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- info = 0;
- if (! (lsame_(side, "L") || lsame_(side, "R"))) {
- info = 1;
- } else if (! (lsame_(pivot, "V") || lsame_(pivot,
- "T") || lsame_(pivot, "B"))) {
- info = 2;
- } else if (! (lsame_(direct, "F") || lsame_(direct,
- "B"))) {
- info = 3;
- } else if (*m < 0) {
- info = 4;
- } else if (*n < 0) {
- info = 5;
- } else if (*lda < max(1,*m)) {
- info = 9;
- }
- if (info != 0) {
- xerbla_("DLASR ", &info);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0) {
- return 0;
- }
- if (lsame_(side, "L")) {
-
-/* Form P * A */
-
- if (lsame_(pivot, "V")) {
- if (lsame_(direct, "F")) {
- i__1 = *m - 1;
- for (j = 1; j <= i__1; ++j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[j + 1 + i__ * a_dim1];
- a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
- a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
- + i__ * a_dim1];
-/* L10: */
- }
- }
-/* L20: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *m - 1; j >= 1; --j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[j + 1 + i__ * a_dim1];
- a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
- a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
- + i__ * a_dim1];
-/* L30: */
- }
- }
-/* L40: */
- }
- }
- } else if (lsame_(pivot, "T")) {
- if (lsame_(direct, "F")) {
- i__1 = *m;
- for (j = 2; j <= i__1; ++j) {
- ctemp = c__[j - 1];
- stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
- i__ * a_dim1 + 1];
- a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
- i__ * a_dim1 + 1];
-/* L50: */
- }
- }
-/* L60: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *m; j >= 2; --j) {
- ctemp = c__[j - 1];
- stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
- i__ * a_dim1 + 1];
- a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
- i__ * a_dim1 + 1];
-/* L70: */
- }
- }
-/* L80: */
- }
- }
- } else if (lsame_(pivot, "B")) {
- if (lsame_(direct, "F")) {
- i__1 = *m - 1;
- for (j = 1; j <= i__1; ++j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
- + ctemp * temp;
- a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
- a_dim1] - stemp * temp;
-/* L90: */
- }
- }
-/* L100: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *m - 1; j >= 1; --j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *n;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[j + i__ * a_dim1];
- a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
- + ctemp * temp;
- a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
- a_dim1] - stemp * temp;
-/* L110: */
- }
- }
-/* L120: */
- }
- }
- }
- } else if (lsame_(side, "R")) {
-
-/* Form A * P' */
-
- if (lsame_(pivot, "V")) {
- if (lsame_(direct, "F")) {
- i__1 = *n - 1;
- for (j = 1; j <= i__1; ++j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[i__ + (j + 1) * a_dim1];
- a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
- a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
- i__ + j * a_dim1];
-/* L130: */
- }
- }
-/* L140: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *n - 1; j >= 1; --j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[i__ + (j + 1) * a_dim1];
- a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
- a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
- i__ + j * a_dim1];
-/* L150: */
- }
- }
-/* L160: */
- }
- }
- } else if (lsame_(pivot, "T")) {
- if (lsame_(direct, "F")) {
- i__1 = *n;
- for (j = 2; j <= i__1; ++j) {
- ctemp = c__[j - 1];
- stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
- i__ + a_dim1];
- a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
- a_dim1];
-/* L170: */
- }
- }
-/* L180: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *n; j >= 2; --j) {
- ctemp = c__[j - 1];
- stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
- i__ + a_dim1];
- a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
- a_dim1];
-/* L190: */
- }
- }
-/* L200: */
- }
- }
- } else if (lsame_(pivot, "B")) {
- if (lsame_(direct, "F")) {
- i__1 = *n - 1;
- for (j = 1; j <= i__1; ++j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__2 = *m;
- for (i__ = 1; i__ <= i__2; ++i__) {
- temp = a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
- + ctemp * temp;
- a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
- a_dim1] - stemp * temp;
-/* L210: */
- }
- }
-/* L220: */
- }
- } else if (lsame_(direct, "B")) {
- for (j = *n - 1; j >= 1; --j) {
- ctemp = c__[j];
- stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
- i__1 = *m;
- for (i__ = 1; i__ <= i__1; ++i__) {
- temp = a[i__ + j * a_dim1];
- a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
- + ctemp * temp;
- a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
- a_dim1] - stemp * temp;
-/* L230: */
- }
- }
-/* L240: */
- }
- }
- }
- }
-
- return 0;
-
-/* End of DLASR */
-
-} /* dlasr_ */
-
-/* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer *
- info)
-{
- /* System generated locals */
- integer i__1, i__2;
-
- /* Local variables */
- static integer i__, j;
- static doublereal d1, d2, d3;
- static integer dir;
- static doublereal tmp;
- static integer endd;
- extern logical lsame_(char *, char *);
- static integer stack[64] /* was [2][32] */;
- static doublereal dmnmx;
- static integer start;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static integer stkpnt;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- Sort the numbers in D in increasing order (if ID = 'I') or
- in decreasing order (if ID = 'D' ).
-
- Use Quick Sort, reverting to Insertion sort on arrays of
- size <= 20. Dimension of STACK limits N to about 2**32.
-
- Arguments
- =========
-
- ID (input) CHARACTER*1
- = 'I': sort D in increasing order;
- = 'D': sort D in decreasing order.
-
- N (input) INTEGER
- The length of the array D.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the array to be sorted.
- On exit, D has been sorted into increasing order
- (D(1) <= ... <= D(N) ) or into decreasing order
- (D(1) >= ... >= D(N) ), depending on ID.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input paramters.
-*/
-
- /* Parameter adjustments */
- --d__;
-
- /* Function Body */
- *info = 0;
- dir = -1;
- if (lsame_(id, "D")) {
- dir = 0;
- } else if (lsame_(id, "I")) {
- dir = 1;
- }
- if (dir == -1) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DLASRT", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n <= 1) {
- return 0;
- }
-
- stkpnt = 1;
- stack[0] = 1;
- stack[1] = *n;
-L10:
- start = stack[((stkpnt) << (1)) - 2];
- endd = stack[((stkpnt) << (1)) - 1];
- --stkpnt;
- if ((endd - start <= 20 && endd - start > 0)) {
-
-/* Do Insertion sort on D( START:ENDD ) */
-
- if (dir == 0) {
-
-/* Sort into decreasing order */
-
- i__1 = endd;
- for (i__ = start + 1; i__ <= i__1; ++i__) {
- i__2 = start + 1;
- for (j = i__; j >= i__2; --j) {
- if (d__[j] > d__[j - 1]) {
- dmnmx = d__[j];
- d__[j] = d__[j - 1];
- d__[j - 1] = dmnmx;
- } else {
- goto L30;
- }
-/* L20: */
- }
-L30:
- ;
- }
-
- } else {
-
-/* Sort into increasing order */
-
- i__1 = endd;
- for (i__ = start + 1; i__ <= i__1; ++i__) {
- i__2 = start + 1;
- for (j = i__; j >= i__2; --j) {
- if (d__[j] < d__[j - 1]) {
- dmnmx = d__[j];
- d__[j] = d__[j - 1];
- d__[j - 1] = dmnmx;
- } else {
- goto L50;
- }
-/* L40: */
- }
-L50:
- ;
- }
-
- }
-
- } else if (endd - start > 20) {
-
-/*
- Partition D( START:ENDD ) and stack parts, largest one first
-
- Choose partition entry as median of 3
-*/
-
- d1 = d__[start];
- d2 = d__[endd];
- i__ = (start + endd) / 2;
- d3 = d__[i__];
- if (d1 < d2) {
- if (d3 < d1) {
- dmnmx = d1;
- } else if (d3 < d2) {
- dmnmx = d3;
- } else {
- dmnmx = d2;
- }
- } else {
- if (d3 < d2) {
- dmnmx = d2;
- } else if (d3 < d1) {
- dmnmx = d3;
- } else {
- dmnmx = d1;
- }
- }
-
- if (dir == 0) {
-
-/* Sort into decreasing order */
-
- i__ = start - 1;
- j = endd + 1;
-L60:
-L70:
- --j;
- if (d__[j] < dmnmx) {
- goto L70;
- }
-L80:
- ++i__;
- if (d__[i__] > dmnmx) {
- goto L80;
- }
- if (i__ < j) {
- tmp = d__[i__];
- d__[i__] = d__[j];
- d__[j] = tmp;
- goto L60;
- }
- if (j - start > endd - j - 1) {
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = start;
- stack[((stkpnt) << (1)) - 1] = j;
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = j + 1;
- stack[((stkpnt) << (1)) - 1] = endd;
- } else {
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = j + 1;
- stack[((stkpnt) << (1)) - 1] = endd;
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = start;
- stack[((stkpnt) << (1)) - 1] = j;
- }
- } else {
-
-/* Sort into increasing order */
-
- i__ = start - 1;
- j = endd + 1;
-L90:
-L100:
- --j;
- if (d__[j] > dmnmx) {
- goto L100;
- }
-L110:
- ++i__;
- if (d__[i__] < dmnmx) {
- goto L110;
- }
- if (i__ < j) {
- tmp = d__[i__];
- d__[i__] = d__[j];
- d__[j] = tmp;
- goto L90;
- }
- if (j - start > endd - j - 1) {
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = start;
- stack[((stkpnt) << (1)) - 1] = j;
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = j + 1;
- stack[((stkpnt) << (1)) - 1] = endd;
- } else {
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = j + 1;
- stack[((stkpnt) << (1)) - 1] = endd;
- ++stkpnt;
- stack[((stkpnt) << (1)) - 2] = start;
- stack[((stkpnt) << (1)) - 1] = j;
- }
- }
- }
- if (stkpnt > 0) {
- goto L10;
- }
- return 0;
-
-/* End of DLASRT */
-
-} /* dlasrt_ */
-
-/* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx,
- doublereal *scale, doublereal *sumsq)
-{
- /* System generated locals */
- integer i__1, i__2;
- doublereal d__1;
-
- /* Local variables */
- static integer ix;
- static doublereal absxi;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASSQ returns the values scl and smsq such that
-
- ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
-
- where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is
- assumed to be non-negative and scl returns the value
-
- scl = max( scale, abs( x( i ) ) ).
-
- scale and sumsq must be supplied in SCALE and SUMSQ and
- scl and smsq are overwritten on SCALE and SUMSQ respectively.
-
- The routine makes only one pass through the vector x.
-
- Arguments
- =========
-
- N (input) INTEGER
- The number of elements to be used from the vector X.
-
- X (input) DOUBLE PRECISION array, dimension (N)
- The vector for which a scaled sum of squares is computed.
- x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
-
- INCX (input) INTEGER
- The increment between successive values of the vector X.
- INCX > 0.
-
- SCALE (input/output) DOUBLE PRECISION
- On entry, the value scale in the equation above.
- On exit, SCALE is overwritten with scl , the scaling factor
- for the sum of squares.
-
- SUMSQ (input/output) DOUBLE PRECISION
- On entry, the value sumsq in the equation above.
- On exit, SUMSQ is overwritten with smsq , the basic sum of
- squares from which scl has been factored out.
-
- =====================================================================
-*/
-
-
- /* Parameter adjustments */
- --x;
-
- /* Function Body */
- if (*n > 0) {
- i__1 = (*n - 1) * *incx + 1;
- i__2 = *incx;
- for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
- if (x[ix] != 0.) {
- absxi = (d__1 = x[ix], abs(d__1));
- if (*scale < absxi) {
-/* Computing 2nd power */
- d__1 = *scale / absxi;
- *sumsq = *sumsq * (d__1 * d__1) + 1;
- *scale = absxi;
- } else {
-/* Computing 2nd power */
- d__1 = absxi / *scale;
- *sumsq += d__1 * d__1;
- }
- }
-/* L10: */
- }
- }
- return 0;
-
-/* End of DLASSQ */
-
-} /* dlassq_ */
-
-/* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__,
- doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal *
- csr, doublereal *snl, doublereal *csl)
-{
- /* System generated locals */
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt,
- clt, crt, slt, srt;
- static integer pmax;
- static doublereal temp;
- static logical swap;
- static doublereal tsign;
-
- static logical gasmal;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLASV2 computes the singular value decomposition of a 2-by-2
- triangular matrix
- [ F G ]
- [ 0 H ].
- On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the
- smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and
- right singular vectors for abs(SSMAX), giving the decomposition
-
- [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ]
- [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ].
-
- Arguments
- =========
-
- F (input) DOUBLE PRECISION
- The (1,1) element of the 2-by-2 matrix.
-
- G (input) DOUBLE PRECISION
- The (1,2) element of the 2-by-2 matrix.
-
- H (input) DOUBLE PRECISION
- The (2,2) element of the 2-by-2 matrix.
-
- SSMIN (output) DOUBLE PRECISION
- abs(SSMIN) is the smaller singular value.
-
- SSMAX (output) DOUBLE PRECISION
- abs(SSMAX) is the larger singular value.
-
- SNL (output) DOUBLE PRECISION
- CSL (output) DOUBLE PRECISION
- The vector (CSL, SNL) is a unit left singular vector for the
- singular value abs(SSMAX).
-
- SNR (output) DOUBLE PRECISION
- CSR (output) DOUBLE PRECISION
- The vector (CSR, SNR) is a unit right singular vector for the
- singular value abs(SSMAX).
-
- Further Details
- ===============
-
- Any input parameter may be aliased with any output parameter.
-
- Barring over/underflow and assuming a guard digit in subtraction, all
- output quantities are correct to within a few units in the last
- place (ulps).
-
- In IEEE arithmetic, the code works correctly if one matrix element is
- infinite.
-
- Overflow will not occur unless the largest singular value itself
- overflows or is within a few ulps of overflow. (On machines with
- partial overflow, like the Cray, overflow may occur if the largest
- singular value is within a factor of 2 of overflow.)
-
- Underflow is harmless if underflow is gradual. Otherwise, results
- may correspond to a matrix modified by perturbations of size near
- the underflow threshold.
-
- =====================================================================
-*/
-
-
- ft = *f;
- fa = abs(ft);
- ht = *h__;
- ha = abs(*h__);
-
-/*
- PMAX points to the maximum absolute element of matrix
- PMAX = 1 if F largest in absolute values
- PMAX = 2 if G largest in absolute values
- PMAX = 3 if H largest in absolute values
-*/
-
- pmax = 1;
- swap = ha > fa;
- if (swap) {
- pmax = 3;
- temp = ft;
- ft = ht;
- ht = temp;
- temp = fa;
- fa = ha;
- ha = temp;
-
-/* Now FA .ge. HA */
-
- }
- gt = *g;
- ga = abs(gt);
- if (ga == 0.) {
-
-/* Diagonal matrix */
-
- *ssmin = ha;
- *ssmax = fa;
- clt = 1.;
- crt = 1.;
- slt = 0.;
- srt = 0.;
- } else {
- gasmal = TRUE_;
- if (ga > fa) {
- pmax = 2;
- if (fa / ga < EPSILON) {
-
-/* Case of very large GA */
-
- gasmal = FALSE_;
- *ssmax = ga;
- if (ha > 1.) {
- *ssmin = fa / (ga / ha);
- } else {
- *ssmin = fa / ga * ha;
- }
- clt = 1.;
- slt = ht / gt;
- srt = 1.;
- crt = ft / gt;
- }
- }
- if (gasmal) {
-
-/* Normal case */
-
- d__ = fa - ha;
- if (d__ == fa) {
-
-/* Copes with infinite F or H */
-
- l = 1.;
- } else {
- l = d__ / fa;
- }
-
-/* Note that 0 .le. L .le. 1 */
-
- m = gt / ft;
-
-/* Note that abs(M) .le. 1/macheps */
-
- t = 2. - l;
-
-/* Note that T .ge. 1 */
-
- mm = m * m;
- tt = t * t;
- s = sqrt(tt + mm);
-
-/* Note that 1 .le. S .le. 1 + 1/macheps */
-
- if (l == 0.) {
- r__ = abs(m);
- } else {
- r__ = sqrt(l * l + mm);
- }
-
-/* Note that 0 .le. R .le. 1 + 1/macheps */
-
- a = (s + r__) * .5;
-
-/* Note that 1 .le. A .le. 1 + abs(M) */
-
- *ssmin = ha / a;
- *ssmax = fa * a;
- if (mm == 0.) {
-
-/* Note that M is very tiny */
-
- if (l == 0.) {
- t = d_sign(&c_b2804, &ft) * d_sign(&c_b15, &gt);
- } else {
- t = gt / d_sign(&d__, &ft) + m / t;
- }
- } else {
- t = (m / (s + t) + m / (r__ + l)) * (a + 1.);
- }
- l = sqrt(t * t + 4.);
- crt = 2. / l;
- srt = t / l;
- clt = (crt + srt * m) / a;
- slt = ht / ft * srt / a;
- }
- }
- if (swap) {
- *csl = srt;
- *snl = crt;
- *csr = slt;
- *snr = clt;
- } else {
- *csl = clt;
- *snl = slt;
- *csr = crt;
- *snr = srt;
- }
-
-/* Correct signs of SSMAX and SSMIN */
-
- if (pmax == 1) {
- tsign = d_sign(&c_b15, csr) * d_sign(&c_b15, csl) * d_sign(&c_b15, f);
- }
- if (pmax == 2) {
- tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, csl) * d_sign(&c_b15, g);
- }
- if (pmax == 3) {
- tsign = d_sign(&c_b15, snr) * d_sign(&c_b15, snl) * d_sign(&c_b15,
- h__);
- }
- *ssmax = d_sign(ssmax, &tsign);
- d__1 = tsign * d_sign(&c_b15, f) * d_sign(&c_b15, h__);
- *ssmin = d_sign(ssmin, &d__1);
- return 0;
-
-/* End of DLASV2 */
-
-} /* dlasv2_ */
-
-/* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer
- *k1, integer *k2, integer *ipiv, integer *incx)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
- static doublereal temp;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DLASWP performs a series of row interchanges on the matrix A.
- One row interchange is initiated for each of rows K1 through K2 of A.
-
- Arguments
- =========
-
- N (input) INTEGER
- The number of columns of the matrix A.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the matrix of column dimension N to which the row
- interchanges will be applied.
- On exit, the permuted matrix.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
-
- K1 (input) INTEGER
- The first element of IPIV for which a row interchange will
- be done.
-
- K2 (input) INTEGER
- The last element of IPIV for which a row interchange will
- be done.
-
- IPIV (input) INTEGER array, dimension (M*abs(INCX))
- The vector of pivot indices. Only the elements in positions
- K1 through K2 of IPIV are accessed.
- IPIV(K) = L implies rows K and L are to be interchanged.
-
- INCX (input) INTEGER
- The increment between successive values of IPIV. If IPIV
- is negative, the pivots are applied in reverse order.
-
- Further Details
- ===============
-
- Modified by
- R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
-
- =====================================================================
-
-
- Interchange row I with row IPIV(I) for each of rows K1 through K2.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --ipiv;
-
- /* Function Body */
- if (*incx > 0) {
- ix0 = *k1;
- i1 = *k1;
- i2 = *k2;
- inc = 1;
- } else if (*incx < 0) {
- ix0 = (1 - *k2) * *incx + 1;
- i1 = *k2;
- i2 = *k1;
- inc = -1;
- } else {
- return 0;
- }
-
- n32 = (*n / 32) << (5);
- if (n32 != 0) {
- i__1 = n32;
- for (j = 1; j <= i__1; j += 32) {
- ix = ix0;
- i__2 = i2;
- i__3 = inc;
- for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
- {
- ip = ipiv[ix];
- if (ip != i__) {
- i__4 = j + 31;
- for (k = j; k <= i__4; ++k) {
- temp = a[i__ + k * a_dim1];
- a[i__ + k * a_dim1] = a[ip + k * a_dim1];
- a[ip + k * a_dim1] = temp;
-/* L10: */
- }
- }
- ix += *incx;
-/* L20: */
- }
-/* L30: */
- }
- }
- if (n32 != *n) {
- ++n32;
- ix = ix0;
- i__1 = i2;
- i__3 = inc;
- for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
- ip = ipiv[ix];
- if (ip != i__) {
- i__2 = *n;
- for (k = n32; k <= i__2; ++k) {
- temp = a[i__ + k * a_dim1];
- a[i__ + k * a_dim1] = a[ip + k * a_dim1];
- a[ip + k * a_dim1] = temp;
-/* L40: */
- }
- }
- ix += *incx;
-/* L50: */
- }
- }
-
- return 0;
-
-/* End of DLASWP */
-
-} /* dlaswp_ */
-
-/* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal *
- a, integer *lda, doublereal *e, doublereal *tau, doublereal *w,
- integer *ldw)
-{
- /* System generated locals */
- integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, iw;
- extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- static doublereal alpha;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *), daxpy_(integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *),
- dsymv_(char *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *,
- doublereal *);
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DLATRD reduces NB rows and columns of a real symmetric matrix A to
- symmetric tridiagonal form by an orthogonal similarity
- transformation Q' * A * Q, and returns the matrices V and W which are
- needed to apply the transformation to the unreduced part of A.
-
- If UPLO = 'U', DLATRD reduces the last NB rows and columns of a
- matrix, of which the upper triangle is supplied;
- if UPLO = 'L', DLATRD reduces the first NB rows and columns of a
- matrix, of which the lower triangle is supplied.
-
- This is an auxiliary routine called by DSYTRD.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER
- Specifies whether the upper or lower triangular part of the
- symmetric matrix A is stored:
- = 'U': Upper triangular
- = 'L': Lower triangular
-
- N (input) INTEGER
- The order of the matrix A.
-
- NB (input) INTEGER
- The number of rows and columns to be reduced.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the symmetric matrix A. If UPLO = 'U', the leading
- n-by-n upper triangular part of A contains the upper
- triangular part of the matrix A, and the strictly lower
- triangular part of A is not referenced. If UPLO = 'L', the
- leading n-by-n lower triangular part of A contains the lower
- triangular part of the matrix A, and the strictly upper
- triangular part of A is not referenced.
- On exit:
- if UPLO = 'U', the last NB columns have been reduced to
- tridiagonal form, with the diagonal elements overwriting
- the diagonal elements of A; the elements above the diagonal
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors;
- if UPLO = 'L', the first NB columns have been reduced to
- tridiagonal form, with the diagonal elements overwriting
- the diagonal elements of A; the elements below the diagonal
- with the array TAU, represent the orthogonal matrix Q as a
- product of elementary reflectors.
- See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= (1,N).
-
- E (output) DOUBLE PRECISION array, dimension (N-1)
- If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
- elements of the last NB columns of the reduced matrix;
- if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
- the first NB columns of the reduced matrix.
-
- TAU (output) DOUBLE PRECISION array, dimension (N-1)
- The scalar factors of the elementary reflectors, stored in
- TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
- See Further Details.
-
- W (output) DOUBLE PRECISION array, dimension (LDW,NB)
- The n-by-nb matrix W required to update the unreduced part
- of A.
-
- LDW (input) INTEGER
- The leading dimension of the array W. LDW >= max(1,N).
-
- Further Details
- ===============
-
- If UPLO = 'U', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(n) H(n-1) . . . H(n-nb+1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
- and tau in TAU(i-1).
-
- If UPLO = 'L', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(1) H(2) . . . H(nb).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
- and tau in TAU(i).
-
- The elements of the vectors v together form the n-by-nb matrix V
- which is needed, with W, to apply the transformation to the unreduced
- part of the matrix, using a symmetric rank-2k update of the form:
- A := A - V*W' - W*V'.
-
- The contents of A on exit are illustrated by the following examples
- with n = 5 and nb = 2:
-
- if UPLO = 'U': if UPLO = 'L':
-
- ( a a a v4 v5 ) ( d )
- ( a a v4 v5 ) ( 1 d )
- ( a 1 v5 ) ( v1 1 a )
- ( d 1 ) ( v1 v2 a a )
- ( d ) ( v1 v2 a a a )
-
- where d denotes a diagonal element of the reduced matrix, a denotes
- an element of the original matrix that is unchanged, and vi denotes
- an element of the vector defining H(i).
-
- =====================================================================
-
-
- Quick return if possible
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --e;
- --tau;
- w_dim1 = *ldw;
- w_offset = 1 + w_dim1 * 1;
- w -= w_offset;
-
- /* Function Body */
- if (*n <= 0) {
- return 0;
- }
-
- if (lsame_(uplo, "U")) {
-
-/* Reduce last NB columns of upper triangle */
-
- i__1 = *n - *nb + 1;
- for (i__ = *n; i__ >= i__1; --i__) {
- iw = i__ - *n + *nb;
- if (i__ < *n) {
-
-/* Update A(1:i,i) */
-
- i__2 = *n - i__;
- dgemv_("No transpose", &i__, &i__2, &c_b151, &a[(i__ + 1) *
- a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
- c_b15, &a[i__ * a_dim1 + 1], &c__1);
- i__2 = *n - i__;
- dgemv_("No transpose", &i__, &i__2, &c_b151, &w[(iw + 1) *
- w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
- c_b15, &a[i__ * a_dim1 + 1], &c__1);
- }
- if (i__ > 1) {
-
-/*
- Generate elementary reflector H(i) to annihilate
- A(1:i-2,i)
-*/
-
- i__2 = i__ - 1;
- dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
- 1], &c__1, &tau[i__ - 1]);
- e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
- a[i__ - 1 + i__ * a_dim1] = 1.;
-
-/* Compute W(1:i-1,i) */
-
- i__2 = i__ - 1;
- dsymv_("Upper", &i__2, &c_b15, &a[a_offset], lda, &a[i__ *
- a_dim1 + 1], &c__1, &c_b29, &w[iw * w_dim1 + 1], &
- c__1);
- if (i__ < *n) {
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[(iw + 1) *
- w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
- c_b29, &w[i__ + 1 + iw * w_dim1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[(i__ + 1)
- * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
- c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[(i__ + 1) *
- a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
- c_b29, &w[i__ + 1 + iw * w_dim1], &c__1);
- i__2 = i__ - 1;
- i__3 = *n - i__;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[(iw + 1)
- * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
- c__1, &c_b15, &w[iw * w_dim1 + 1], &c__1);
- }
- i__2 = i__ - 1;
- dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1],
- &c__1, &a[i__ * a_dim1 + 1], &c__1);
- i__2 = i__ - 1;
- daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
- w_dim1 + 1], &c__1);
- }
-
-/* L10: */
- }
- } else {
-
-/* Reduce first NB columns of lower triangle */
-
- i__1 = *nb;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/* Update A(i:n,i) */
-
- i__2 = *n - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + a_dim1],
- lda, &w[i__ + w_dim1], ldw, &c_b15, &a[i__ + i__ * a_dim1]
- , &c__1);
- i__2 = *n - i__ + 1;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + w_dim1],
- ldw, &a[i__ + a_dim1], lda, &c_b15, &a[i__ + i__ * a_dim1]
- , &c__1);
- if (i__ < *n) {
-
-/*
- Generate elementary reflector H(i) to annihilate
- A(i+2:n,i)
-*/
-
- i__2 = *n - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) +
- i__ * a_dim1], &c__1, &tau[i__]);
- e[i__] = a[i__ + 1 + i__ * a_dim1];
- a[i__ + 1 + i__ * a_dim1] = 1.;
-
-/* Compute W(i+1:n,i) */
-
- i__2 = *n - i__;
- dsymv_("Lower", &i__2, &c_b15, &a[i__ + 1 + (i__ + 1) *
- a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
- c_b29, &w[i__ + 1 + i__ * w_dim1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &w[i__ + 1 + w_dim1]
- , ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[
- i__ * w_dim1 + 1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[i__ + 1 +
- a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[
- i__ + 1 + i__ * w_dim1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("Transpose", &i__2, &i__3, &c_b15, &a[i__ + 1 + a_dim1]
- , lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &w[
- i__ * w_dim1 + 1], &c__1);
- i__2 = *n - i__;
- i__3 = i__ - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &w[i__ + 1 +
- w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b15, &w[
- i__ + 1 + i__ * w_dim1], &c__1);
- i__2 = *n - i__;
- dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
- i__2 = *n - i__;
- alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ *
- w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
- i__2 = *n - i__;
- daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
- i__ + 1 + i__ * w_dim1], &c__1);
- }
-
-/* L20: */
- }
- }
-
- return 0;
-
-/* End of DLATRD */
-
-} /* dlatrd_ */
-
-/* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal *
- a, integer *lda, doublereal *tau, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
- doublereal d__1;
-
- /* Local variables */
- static integer i__, j, l;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *), dlarf_(char *, integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DORG2R generates an m by n real matrix Q with orthonormal columns,
- which is defined as the first n columns of a product of k elementary
- reflectors of order m
-
- Q = H(1) H(2) . . . H(k)
-
- as returned by DGEQRF.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix Q. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix Q. M >= N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines the
- matrix Q. N >= K >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the i-th column must contain the vector which
- defines the elementary reflector H(i), for i = 1,2,...,k, as
- returned by DGEQRF in the first k columns of its array
- argument A.
- On exit, the m-by-n matrix Q.
-
- LDA (input) INTEGER
- The first dimension of the array A. LDA >= max(1,M).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQRF.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (N)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument has an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0 || *n > *m) {
- *info = -2;
- } else if (*k < 0 || *k > *n) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORG2R", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n <= 0) {
- return 0;
- }
-
-/* Initialise columns k+1:n to columns of the unit matrix */
-
- i__1 = *n;
- for (j = *k + 1; j <= i__1; ++j) {
- i__2 = *m;
- for (l = 1; l <= i__2; ++l) {
- a[l + j * a_dim1] = 0.;
-/* L10: */
- }
- a[j + j * a_dim1] = 1.;
-/* L20: */
- }
-
- for (i__ = *k; i__ >= 1; --i__) {
-
-/* Apply H(i) to A(i:m,i:n) from the left */
-
- if (i__ < *n) {
- a[i__ + i__ * a_dim1] = 1.;
- i__1 = *m - i__ + 1;
- i__2 = *n - i__;
- dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
- i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
- }
- if (i__ < *m) {
- i__1 = *m - i__;
- d__1 = -tau[i__];
- dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
- }
- a[i__ + i__ * a_dim1] = 1. - tau[i__];
-
-/* Set A(1:i-1,i) to zero */
-
- i__1 = i__ - 1;
- for (l = 1; l <= i__1; ++l) {
- a[l + i__ * a_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
- return 0;
-
-/* End of DORG2R */
-
-} /* dorg2r_ */
-
-/* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k,
- doublereal *a, integer *lda, doublereal *tau, doublereal *work,
- integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, nb, mn;
- extern logical lsame_(char *, char *);
- static integer iinfo;
- static logical wantq;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dorglq_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, integer *);
- static integer lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORGBR generates one of the real orthogonal matrices Q or P**T
- determined by DGEBRD when reducing a real matrix A to bidiagonal
- form: A = Q * B * P**T. Q and P**T are defined as products of
- elementary reflectors H(i) or G(i) respectively.
-
- If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
- is of order M:
- if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n
- columns of Q, where m >= n >= k;
- if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an
- M-by-M matrix.
-
- If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T
- is of order N:
- if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m
- rows of P**T, where n >= m >= k;
- if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as
- an N-by-N matrix.
-
- Arguments
- =========
-
- VECT (input) CHARACTER*1
- Specifies whether the matrix Q or the matrix P**T is
- required, as defined in the transformation applied by DGEBRD:
- = 'Q': generate Q;
- = 'P': generate P**T.
-
- M (input) INTEGER
- The number of rows of the matrix Q or P**T to be returned.
- M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix Q or P**T to be returned.
- N >= 0.
- If VECT = 'Q', M >= N >= min(M,K);
- if VECT = 'P', N >= M >= min(N,K).
-
- K (input) INTEGER
- If VECT = 'Q', the number of columns in the original M-by-K
- matrix reduced by DGEBRD.
- If VECT = 'P', the number of rows in the original K-by-N
- matrix reduced by DGEBRD.
- K >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the vectors which define the elementary reflectors,
- as returned by DGEBRD.
- On exit, the M-by-N matrix Q or P**T.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,M).
-
- TAU (input) DOUBLE PRECISION array, dimension
- (min(M,K)) if VECT = 'Q'
- (min(N,K)) if VECT = 'P'
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i) or G(i), which determines Q or P**T, as
- returned by DGEBRD in its array argument TAUQ or TAUP.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,min(M,N)).
- For optimum performance LWORK >= min(M,N)*NB, where NB
- is the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- wantq = lsame_(vect, "Q");
- mn = min(*m,*n);
- lquery = *lwork == -1;
- if ((! wantq && ! lsame_(vect, "P"))) {
- *info = -1;
- } else if (*m < 0) {
- *info = -2;
- } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq
- && (*m > *n || *m < min(*n,*k)))) {
- *info = -3;
- } else if (*k < 0) {
- *info = -4;
- } else if (*lda < max(1,*m)) {
- *info = -6;
- } else if ((*lwork < max(1,mn) && ! lquery)) {
- *info = -9;
- }
-
- if (*info == 0) {
- if (wantq) {
- nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
- ftnlen)1);
- } else {
- nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
- ftnlen)1);
- }
- lwkopt = max(1,mn) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORGBR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0) {
- work[1] = 1.;
- return 0;
- }
-
- if (wantq) {
-
-/*
- Form Q, determined by a call to DGEBRD to reduce an m-by-k
- matrix
-*/
-
- if (*m >= *k) {
-
-/* If m >= k, assume m >= n >= k */
-
- dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
- iinfo);
-
- } else {
-
-/*
- If m < k, assume m = n
-
- Shift the vectors which define the elementary reflectors one
- column to the right, and set the first row and column of Q
- to those of the unit matrix
-*/
-
- for (j = *m; j >= 2; --j) {
- a[j * a_dim1 + 1] = 0.;
- i__1 = *m;
- for (i__ = j + 1; i__ <= i__1; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
-/* L10: */
- }
-/* L20: */
- }
- a[a_dim1 + 1] = 1.;
- i__1 = *m;
- for (i__ = 2; i__ <= i__1; ++i__) {
- a[i__ + a_dim1] = 0.;
-/* L30: */
- }
- if (*m > 1) {
-
-/* Form Q(2:m,2:m) */
-
- i__1 = *m - 1;
- i__2 = *m - 1;
- i__3 = *m - 1;
- dorgqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
- tau[1], &work[1], lwork, &iinfo);
- }
- }
- } else {
-
-/*
- Form P', determined by a call to DGEBRD to reduce a k-by-n
- matrix
-*/
-
- if (*k < *n) {
-
-/* If k < n, assume k <= m <= n */
-
- dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
- iinfo);
-
- } else {
-
-/*
- If k >= n, assume m = n
-
- Shift the vectors which define the elementary reflectors one
- row downward, and set the first row and column of P' to
- those of the unit matrix
-*/
-
- a[a_dim1 + 1] = 1.;
- i__1 = *n;
- for (i__ = 2; i__ <= i__1; ++i__) {
- a[i__ + a_dim1] = 0.;
-/* L40: */
- }
- i__1 = *n;
- for (j = 2; j <= i__1; ++j) {
- for (i__ = j - 1; i__ >= 2; --i__) {
- a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
-/* L50: */
- }
- a[j * a_dim1 + 1] = 0.;
-/* L60: */
- }
- if (*n > 1) {
-
-/* Form P'(2:n,2:n) */
-
- i__1 = *n - 1;
- i__2 = *n - 1;
- i__3 = *n - 1;
- dorglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
- tau[1], &work[1], lwork, &iinfo);
- }
- }
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORGBR */
-
-} /* dorgbr_ */
-
-/* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi,
- doublereal *a, integer *lda, doublereal *tau, doublereal *work,
- integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, j, nb, nh, iinfo;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- integer *);
- static integer lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORGHR generates a real orthogonal matrix Q which is defined as the
- product of IHI-ILO elementary reflectors of order N, as returned by
- DGEHRD:
-
- Q = H(ilo) H(ilo+1) . . . H(ihi-1).
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the matrix Q. N >= 0.
-
- ILO (input) INTEGER
- IHI (input) INTEGER
- ILO and IHI must have the same values as in the previous call
- of DGEHRD. Q is equal to the unit matrix except in the
- submatrix Q(ilo+1:ihi,ilo+1:ihi).
- 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the vectors which define the elementary reflectors,
- as returned by DGEHRD.
- On exit, the N-by-N orthogonal matrix Q.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- TAU (input) DOUBLE PRECISION array, dimension (N-1)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEHRD.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= IHI-ILO.
- For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
- the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- nh = *ihi - *ilo;
- lquery = *lwork == -1;
- if (*n < 0) {
- *info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
- *info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- } else if ((*lwork < max(1,nh) && ! lquery)) {
- *info = -8;
- }
-
- if (*info == 0) {
- nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
- ftnlen)1);
- lwkopt = max(1,nh) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORGHR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- work[1] = 1.;
- return 0;
- }
-
-/*
- Shift the vectors which define the elementary reflectors one
- column to the right, and set the first ilo and the last n-ihi
- rows and columns to those of the unit matrix
-*/
-
- i__1 = *ilo + 1;
- for (j = *ihi; j >= i__1; --j) {
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L10: */
- }
- i__2 = *ihi;
- for (i__ = j + 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
-/* L20: */
- }
- i__2 = *n;
- for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
- i__1 = *ilo;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L50: */
- }
- a[j + j * a_dim1] = 1.;
-/* L60: */
- }
- i__1 = *n;
- for (j = *ihi + 1; j <= i__1; ++j) {
- i__2 = *n;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L70: */
- }
- a[j + j * a_dim1] = 1.;
-/* L80: */
- }
-
- if (nh > 0) {
-
-/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
-
- dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
- ilo], &work[1], lwork, &iinfo);
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORGHR */
-
-} /* dorghr_ */
-
-/* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal *
- a, integer *lda, doublereal *tau, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2;
- doublereal d__1;
-
- /* Local variables */
- static integer i__, j, l;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *), dlarf_(char *, integer *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORGL2 generates an m by n real matrix Q with orthonormal rows,
- which is defined as the first m rows of a product of k elementary
- reflectors of order n
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGELQF.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix Q. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix Q. N >= M.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines the
- matrix Q. M >= K >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the i-th row must contain the vector which defines
- the elementary reflector H(i), for i = 1,2,...,k, as returned
- by DGELQF in the first k rows of its array argument A.
- On exit, the m-by-n matrix Q.
-
- LDA (input) INTEGER
- The first dimension of the array A. LDA >= max(1,M).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGELQF.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (M)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument has an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- if (*m < 0) {
- *info = -1;
- } else if (*n < *m) {
- *info = -2;
- } else if (*k < 0 || *k > *m) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORGL2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m <= 0) {
- return 0;
- }
-
- if (*k < *m) {
-
-/* Initialise rows k+1:m to rows of the unit matrix */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (l = *k + 1; l <= i__2; ++l) {
- a[l + j * a_dim1] = 0.;
-/* L10: */
- }
- if ((j > *k && j <= *m)) {
- a[j + j * a_dim1] = 1.;
- }
-/* L20: */
- }
- }
-
- for (i__ = *k; i__ >= 1; --i__) {
-
-/* Apply H(i) to A(i:m,i:n) from the right */
-
- if (i__ < *n) {
- if (i__ < *m) {
- a[i__ + i__ * a_dim1] = 1.;
- i__1 = *m - i__;
- i__2 = *n - i__ + 1;
- dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
- tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
- }
- i__1 = *n - i__;
- d__1 = -tau[i__];
- dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda);
- }
- a[i__ + i__ * a_dim1] = 1. - tau[i__];
-
-/* Set A(i,1:i-1) to zero */
-
- i__1 = i__ - 1;
- for (l = 1; l <= i__1; ++l) {
- a[i__ + l * a_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
- return 0;
-
-/* End of DORGL2 */
-
-} /* dorgl2_ */
-
-/* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal *
- a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
- integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *),
- dlarfb_(char *, char *, char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORGLQ generates an M-by-N real matrix Q with orthonormal rows,
- which is defined as the first M rows of a product of K elementary
- reflectors of order N
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGELQF.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix Q. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix Q. N >= M.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines the
- matrix Q. M >= K >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the i-th row must contain the vector which defines
- the elementary reflector H(i), for i = 1,2,...,k, as returned
- by DGELQF in the first k rows of its array argument A.
- On exit, the M-by-N matrix Q.
-
- LDA (input) INTEGER
- The first dimension of the array A. LDA >= max(1,M).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGELQF.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,M).
- For optimum performance LWORK >= M*NB, where NB is
- the optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument has an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
- lwkopt = max(1,*m) * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < *m) {
- *info = -2;
- } else if (*k < 0 || *k > *m) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- } else if ((*lwork < max(1,*m) && ! lquery)) {
- *info = -8;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORGLQ", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m <= 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *m;
- if ((nb > 1 && nb < *k)) {
-
-/*
- Determine when to cross over from blocked to unblocked code.
-
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < *k) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- ldwork = *m;
- iws = ldwork * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
-
- nb = *lwork / ldwork;
-/* Computing MAX */
- i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1,
- (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- }
- }
- }
-
- if (((nb >= nbmin && nb < *k) && nx < *k)) {
-
-/*
- Use blocked code after the last block.
- The first kk rows are handled by the block method.
-*/
-
- ki = (*k - nx - 1) / nb * nb;
-/* Computing MIN */
- i__1 = *k, i__2 = ki + nb;
- kk = min(i__1,i__2);
-
-/* Set A(kk+1:m,1:kk) to zero. */
-
- i__1 = kk;
- for (j = 1; j <= i__1; ++j) {
- i__2 = *m;
- for (i__ = kk + 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- kk = 0;
- }
-
-/* Use unblocked code for the last or only block. */
-
- if (kk < *m) {
- i__1 = *m - kk;
- i__2 = *n - kk;
- i__3 = *k - kk;
- dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
- tau[kk + 1], &work[1], &iinfo);
- }
-
- if (kk > 0) {
-
-/* Use blocked code */
-
- i__1 = -nb;
- for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
-/* Computing MIN */
- i__2 = nb, i__3 = *k - i__ + 1;
- ib = min(i__2,i__3);
- if (i__ + ib <= *m) {
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__2 = *n - i__ + 1;
- dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
- a_dim1], lda, &tau[i__], &work[1], &ldwork);
-
-/* Apply H' to A(i+ib:m,i:n) from the right */
-
- i__2 = *m - i__ - ib + 1;
- i__3 = *n - i__ + 1;
- dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
- i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
- ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
- 1], &ldwork);
- }
-
-/* Apply H' to columns i:n of current block */
-
- i__2 = *n - i__ + 1;
- dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
- work[1], &iinfo);
-
-/* Set columns 1:i-1 of current block to zero */
-
- i__2 = i__ - 1;
- for (j = 1; j <= i__2; ++j) {
- i__3 = i__ + ib - 1;
- for (l = i__; l <= i__3; ++l) {
- a[l + j * a_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
-/* L50: */
- }
- }
-
- work[1] = (doublereal) iws;
- return 0;
-
-/* End of DORGLQ */
-
-} /* dorglq_ */
-
-/* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal *
- a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
- integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
- extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *),
- dlarfb_(char *, char *, char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORGQR generates an M-by-N real matrix Q with orthonormal columns,
- which is defined as the first N columns of a product of K elementary
- reflectors of order M
-
- Q = H(1) H(2) . . . H(k)
-
- as returned by DGEQRF.
-
- Arguments
- =========
-
- M (input) INTEGER
- The number of rows of the matrix Q. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix Q. M >= N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines the
- matrix Q. N >= K >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the i-th column must contain the vector which
- defines the elementary reflector H(i), for i = 1,2,...,k, as
- returned by DGEQRF in the first k columns of its array
- argument A.
- On exit, the M-by-N matrix Q.
-
- LDA (input) INTEGER
- The first dimension of the array A. LDA >= max(1,M).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQRF.
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= max(1,N).
- For optimum performance LWORK >= N*NB, where NB is the
- optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument has an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
- lwkopt = max(1,*n) * nb;
- work[1] = (doublereal) lwkopt;
- lquery = *lwork == -1;
- if (*m < 0) {
- *info = -1;
- } else if (*n < 0 || *n > *m) {
- *info = -2;
- } else if (*k < 0 || *k > *n) {
- *info = -3;
- } else if (*lda < max(1,*m)) {
- *info = -5;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
- *info = -8;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORGQR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n <= 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- nx = 0;
- iws = *n;
- if ((nb > 1 && nb < *k)) {
-
-/*
- Determine when to cross over from blocked to unblocked code.
-
- Computing MAX
-*/
- i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < *k) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- ldwork = *n;
- iws = ldwork * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: reduce NB and
- determine the minimum value of NB.
-*/
-
- nb = *lwork / ldwork;
-/* Computing MAX */
- i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1,
- (ftnlen)6, (ftnlen)1);
- nbmin = max(i__1,i__2);
- }
- }
- }
-
- if (((nb >= nbmin && nb < *k) && nx < *k)) {
-
-/*
- Use blocked code after the last block.
- The first kk columns are handled by the block method.
-*/
-
- ki = (*k - nx - 1) / nb * nb;
-/* Computing MIN */
- i__1 = *k, i__2 = ki + nb;
- kk = min(i__1,i__2);
-
-/* Set A(1:kk,kk+1:n) to zero. */
-
- i__1 = *n;
- for (j = kk + 1; j <= i__1; ++j) {
- i__2 = kk;
- for (i__ = 1; i__ <= i__2; ++i__) {
- a[i__ + j * a_dim1] = 0.;
-/* L10: */
- }
-/* L20: */
- }
- } else {
- kk = 0;
- }
-
-/* Use unblocked code for the last or only block. */
-
- if (kk < *n) {
- i__1 = *m - kk;
- i__2 = *n - kk;
- i__3 = *k - kk;
- dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
- tau[kk + 1], &work[1], &iinfo);
- }
-
- if (kk > 0) {
-
-/* Use blocked code */
-
- i__1 = -nb;
- for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
-/* Computing MIN */
- i__2 = nb, i__3 = *k - i__ + 1;
- ib = min(i__2,i__3);
- if (i__ + ib <= *n) {
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__2 = *m - i__ + 1;
- dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
- a_dim1], lda, &tau[i__], &work[1], &ldwork);
-
-/* Apply H to A(i:m,i+ib:n) from the left */
-
- i__2 = *m - i__ + 1;
- i__3 = *n - i__ - ib + 1;
- dlarfb_("Left", "No transpose", "Forward", "Columnwise", &
- i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
- 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
- work[ib + 1], &ldwork);
- }
-
-/* Apply H to rows i:m of current block */
-
- i__2 = *m - i__ + 1;
- dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
- work[1], &iinfo);
-
-/* Set rows 1:i-1 of current block to zero */
-
- i__2 = i__ + ib - 1;
- for (j = i__; j <= i__2; ++j) {
- i__3 = i__ - 1;
- for (l = 1; l <= i__3; ++l) {
- a[l + j * a_dim1] = 0.;
-/* L30: */
- }
-/* L40: */
- }
-/* L50: */
- }
- }
-
- work[1] = (doublereal) iws;
- return 0;
-
-/* End of DORGQR */
-
-} /* dorgqr_ */
-
-/* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, i1, i2, i3, mi, ni, nq;
- static doublereal aii;
- static logical left;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical notran;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DORM2L overwrites the general real m by n matrix C with
-
- Q * C if SIDE = 'L' and TRANS = 'N', or
-
- Q'* C if SIDE = 'L' and TRANS = 'T', or
-
- C * Q if SIDE = 'R' and TRANS = 'N', or
-
- C * Q' if SIDE = 'R' and TRANS = 'T',
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q' from the Left
- = 'R': apply Q or Q' from the Right
-
- TRANS (input) CHARACTER*1
- = 'N': apply Q (No transpose)
- = 'T': apply Q' (Transpose)
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,K)
- The i-th column must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGEQLF in the last k columns of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- If SIDE = 'L', LDA >= max(1,M);
- if SIDE = 'R', LDA >= max(1,N).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQLF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (N) if SIDE = 'L',
- (M) if SIDE = 'R'
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
-
-/* NQ is the order of Q */
-
- if (left) {
- nq = *m;
- } else {
- nq = *n;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,nq)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORM2L", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- return 0;
- }
-
- if ((left && notran) || (! left && ! notran)) {
- i1 = 1;
- i2 = *k;
- i3 = 1;
- } else {
- i1 = *k;
- i2 = 1;
- i3 = -1;
- }
-
- if (left) {
- ni = *n;
- } else {
- mi = *m;
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
- if (left) {
-
-/* H(i) is applied to C(1:m-k+i,1:n) */
-
- mi = *m - *k + i__;
- } else {
-
-/* H(i) is applied to C(1:m,1:n-k+i) */
-
- ni = *n - *k + i__;
- }
-
-/* Apply H(i) */
-
- aii = a[nq - *k + i__ + i__ * a_dim1];
- a[nq - *k + i__ + i__ * a_dim1] = 1.;
- dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
- c_offset], ldc, &work[1]);
- a[nq - *k + i__ + i__ * a_dim1] = aii;
-/* L10: */
- }
- return 0;
-
-/* End of DORM2L */
-
-} /* dorm2l_ */
-
-/* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
- static doublereal aii;
- static logical left;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical notran;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DORM2R overwrites the general real m by n matrix C with
-
- Q * C if SIDE = 'L' and TRANS = 'N', or
-
- Q'* C if SIDE = 'L' and TRANS = 'T', or
-
- C * Q if SIDE = 'R' and TRANS = 'N', or
-
- C * Q' if SIDE = 'R' and TRANS = 'T',
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(1) H(2) . . . H(k)
-
- as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q' from the Left
- = 'R': apply Q or Q' from the Right
-
- TRANS (input) CHARACTER*1
- = 'N': apply Q (No transpose)
- = 'T': apply Q' (Transpose)
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,K)
- The i-th column must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGEQRF in the first k columns of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- If SIDE = 'L', LDA >= max(1,M);
- if SIDE = 'R', LDA >= max(1,N).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQRF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (N) if SIDE = 'L',
- (M) if SIDE = 'R'
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
-
-/* NQ is the order of Q */
-
- if (left) {
- nq = *m;
- } else {
- nq = *n;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,nq)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORM2R", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- return 0;
- }
-
- if ((left && ! notran) || (! left && notran)) {
- i1 = 1;
- i2 = *k;
- i3 = 1;
- } else {
- i1 = *k;
- i2 = 1;
- i3 = -1;
- }
-
- if (left) {
- ni = *n;
- jc = 1;
- } else {
- mi = *m;
- ic = 1;
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
- if (left) {
-
-/* H(i) is applied to C(i:m,1:n) */
-
- mi = *m - i__ + 1;
- ic = i__;
- } else {
-
-/* H(i) is applied to C(1:m,i:n) */
-
- ni = *n - i__ + 1;
- jc = i__;
- }
-
-/* Apply H(i) */
-
- aii = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
- dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
- ic + jc * c_dim1], ldc, &work[1]);
- a[i__ + i__ * a_dim1] = aii;
-/* L10: */
- }
- return 0;
-
-/* End of DORM2R */
-
-} /* dorm2r_ */
-
-/* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m,
- integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau,
- doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
- integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i1, i2, nb, mi, ni, nq, nw;
- static logical left;
- extern logical lsame_(char *, char *);
- static integer iinfo;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *);
- static logical notran;
- extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *);
- static logical applyq;
- static char transt[1];
- static integer lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C
- with
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': Q * C C * Q
- TRANS = 'T': Q**T * C C * Q**T
-
- If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C
- with
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': P * C C * P
- TRANS = 'T': P**T * C C * P**T
-
- Here Q and P**T are the orthogonal matrices determined by DGEBRD when
- reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and
- P**T are defined as products of elementary reflectors H(i) and G(i)
- respectively.
-
- Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
- order of the orthogonal matrix Q or P**T that is applied.
-
- If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
- if nq >= k, Q = H(1) H(2) . . . H(k);
- if nq < k, Q = H(1) H(2) . . . H(nq-1).
-
- If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
- if k < nq, P = G(1) G(2) . . . G(k);
- if k >= nq, P = G(1) G(2) . . . G(nq-1).
-
- Arguments
- =========
-
- VECT (input) CHARACTER*1
- = 'Q': apply Q or Q**T;
- = 'P': apply P or P**T.
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q, Q**T, P or P**T from the Left;
- = 'R': apply Q, Q**T, P or P**T from the Right.
-
- TRANS (input) CHARACTER*1
- = 'N': No transpose, apply Q or P;
- = 'T': Transpose, apply Q**T or P**T.
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- If VECT = 'Q', the number of columns in the original
- matrix reduced by DGEBRD.
- If VECT = 'P', the number of rows in the original
- matrix reduced by DGEBRD.
- K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension
- (LDA,min(nq,K)) if VECT = 'Q'
- (LDA,nq) if VECT = 'P'
- The vectors which define the elementary reflectors H(i) and
- G(i), whose products determine the matrices Q and P, as
- returned by DGEBRD.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- If VECT = 'Q', LDA >= max(1,nq);
- if VECT = 'P', LDA >= max(1,min(nq,K)).
-
- TAU (input) DOUBLE PRECISION array, dimension (min(nq,K))
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i) or G(i) which determines Q or P, as returned
- by DGEBRD in the array argument TAUQ or TAUP.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the M-by-N matrix C.
- On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q
- or P*C or P**T*C or C*P or C*P**T.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If SIDE = 'L', LWORK >= max(1,N);
- if SIDE = 'R', LWORK >= max(1,M).
- For optimum performance LWORK >= N*NB if SIDE = 'L', and
- LWORK >= M*NB if SIDE = 'R', where NB is the optimal
- blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- applyq = lsame_(vect, "Q");
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
- lquery = *lwork == -1;
-
-/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
-
- if (left) {
- nq = *m;
- nw = *n;
- } else {
- nq = *n;
- nw = *m;
- }
- if ((! applyq && ! lsame_(vect, "P"))) {
- *info = -1;
- } else if ((! left && ! lsame_(side, "R"))) {
- *info = -2;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -3;
- } else if (*m < 0) {
- *info = -4;
- } else if (*n < 0) {
- *info = -5;
- } else if (*k < 0) {
- *info = -6;
- } else /* if(complicated condition) */ {
-/* Computing MAX */
- i__1 = 1, i__2 = min(nq,*k);
- if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
- )) {
- *info = -8;
- } else if (*ldc < max(1,*m)) {
- *info = -11;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
- *info = -13;
- }
- }
-
- if (*info == 0) {
- if (applyq) {
- if (left) {
-/* Writing concatenation */
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = *m - 1;
- i__2 = *m - 1;
- nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, (
- ftnlen)6, (ftnlen)2);
- } else {
-/* Writing concatenation */
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = *n - 1;
- i__2 = *n - 1;
- nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, (
- ftnlen)6, (ftnlen)2);
- }
- } else {
- if (left) {
-/* Writing concatenation */
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = *m - 1;
- i__2 = *m - 1;
- nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
- ftnlen)6, (ftnlen)2);
- } else {
-/* Writing concatenation */
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = *n - 1;
- i__2 = *n - 1;
- nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
- ftnlen)6, (ftnlen)2);
- }
- }
- lwkopt = max(1,nw) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORMBR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- work[1] = 1.;
- if (*m == 0 || *n == 0) {
- return 0;
- }
-
- if (applyq) {
-
-/* Apply Q */
-
- if (nq >= *k) {
-
-/* Q was determined by a call to DGEBRD with nq >= k */
-
- dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
- c_offset], ldc, &work[1], lwork, &iinfo);
- } else if (nq > 1) {
-
-/* Q was determined by a call to DGEBRD with nq < k */
-
- if (left) {
- mi = *m - 1;
- ni = *n;
- i1 = 2;
- i2 = 1;
- } else {
- mi = *m;
- ni = *n - 1;
- i1 = 1;
- i2 = 2;
- }
- i__1 = nq - 1;
- dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
- , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
- }
- } else {
-
-/* Apply P */
-
- if (notran) {
- *(unsigned char *)transt = 'T';
- } else {
- *(unsigned char *)transt = 'N';
- }
- if (nq > *k) {
-
-/* P was determined by a call to DGEBRD with nq > k */
-
- dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
- c_offset], ldc, &work[1], lwork, &iinfo);
- } else if (nq > 1) {
-
-/* P was determined by a call to DGEBRD with nq <= k */
-
- if (left) {
- mi = *m - 1;
- ni = *n;
- i1 = 2;
- i2 = 1;
- } else {
- mi = *m;
- ni = *n - 1;
- i1 = 1;
- i2 = 2;
- }
- i__1 = nq - 1;
- dormlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
- lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
- lwork, &iinfo);
- }
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORMBR */
-
-} /* dormbr_ */
-
-/* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
-
- /* Local variables */
- static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
- static doublereal aii;
- static logical left;
- extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static logical notran;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DORML2 overwrites the general real m by n matrix C with
-
- Q * C if SIDE = 'L' and TRANS = 'N', or
-
- Q'* C if SIDE = 'L' and TRANS = 'T', or
-
- C * Q if SIDE = 'R' and TRANS = 'N', or
-
- C * Q' if SIDE = 'R' and TRANS = 'T',
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q' from the Left
- = 'R': apply Q or Q' from the Right
-
- TRANS (input) CHARACTER*1
- = 'N': apply Q (No transpose)
- = 'T': apply Q' (Transpose)
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension
- (LDA,M) if SIDE = 'L',
- (LDA,N) if SIDE = 'R'
- The i-th row must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGELQF in the first k rows of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,K).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGELQF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the m by n matrix C.
- On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace) DOUBLE PRECISION array, dimension
- (N) if SIDE = 'L',
- (M) if SIDE = 'R'
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
-
-/* NQ is the order of Q */
-
- if (left) {
- nq = *m;
- } else {
- nq = *n;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,*k)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORML2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- return 0;
- }
-
- if ((left && notran) || (! left && ! notran)) {
- i1 = 1;
- i2 = *k;
- i3 = 1;
- } else {
- i1 = *k;
- i2 = 1;
- i3 = -1;
- }
-
- if (left) {
- ni = *n;
- jc = 1;
- } else {
- mi = *m;
- ic = 1;
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
- if (left) {
-
-/* H(i) is applied to C(i:m,1:n) */
-
- mi = *m - i__ + 1;
- ic = i__;
- } else {
-
-/* H(i) is applied to C(1:m,i:n) */
-
- ni = *n - i__ + 1;
- jc = i__;
- }
-
-/* Apply H(i) */
-
- aii = a[i__ + i__ * a_dim1];
- a[i__ + i__ * a_dim1] = 1.;
- dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
- ic + jc * c_dim1], ldc, &work[1]);
- a[i__ + i__ * a_dim1] = aii;
-/* L10: */
- }
- return 0;
-
-/* End of DORML2 */
-
-} /* dorml2_ */
-
-/* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
- i__5;
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i__;
- static doublereal t[4160] /* was [65][64] */;
- static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
- static logical left;
- extern logical lsame_(char *, char *);
- static integer nbmin, iinfo;
- extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *), dlarfb_(char
- *, char *, char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static logical notran;
- static integer ldwork;
- static char transt[1];
- static integer lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORMLQ overwrites the general real M-by-N matrix C with
-
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': Q * C C * Q
- TRANS = 'T': Q**T * C C * Q**T
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q**T from the Left;
- = 'R': apply Q or Q**T from the Right.
-
- TRANS (input) CHARACTER*1
- = 'N': No transpose, apply Q;
- = 'T': Transpose, apply Q**T.
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension
- (LDA,M) if SIDE = 'L',
- (LDA,N) if SIDE = 'R'
- The i-th row must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGELQF in the first k rows of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,K).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGELQF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the M-by-N matrix C.
- On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If SIDE = 'L', LWORK >= max(1,N);
- if SIDE = 'R', LWORK >= max(1,M).
- For optimum performance LWORK >= N*NB if SIDE = 'L', and
- LWORK >= M*NB if SIDE = 'R', where NB is the optimal
- blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
- lquery = *lwork == -1;
-
-/* NQ is the order of Q and NW is the minimum dimension of WORK */
-
- if (left) {
- nq = *m;
- nw = *n;
- } else {
- nq = *n;
- nw = *m;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,*k)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
- *info = -12;
- }
-
- if (*info == 0) {
-
-/*
- Determine the block size. NB may be at most NBMAX, where NBMAX
- is used to define the local array T.
-
- Computing MIN
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nb = min(i__1,i__2);
- lwkopt = max(1,nw) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORMLQ", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- ldwork = nw;
- if ((nb > 1 && nb < *k)) {
- iws = nw * nb;
- if (*lwork < iws) {
- nb = *lwork / ldwork;
-/*
- Computing MAX
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nbmin = max(i__1,i__2);
- }
- } else {
- iws = nw;
- }
-
- if (nb < nbmin || nb >= *k) {
-
-/* Use unblocked code */
-
- dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
- c_offset], ldc, &work[1], &iinfo);
- } else {
-
-/* Use blocked code */
-
- if ((left && notran) || (! left && ! notran)) {
- i1 = 1;
- i2 = *k;
- i3 = nb;
- } else {
- i1 = (*k - 1) / nb * nb + 1;
- i2 = 1;
- i3 = -nb;
- }
-
- if (left) {
- ni = *n;
- jc = 1;
- } else {
- mi = *m;
- ic = 1;
- }
-
- if (notran) {
- *(unsigned char *)transt = 'T';
- } else {
- *(unsigned char *)transt = 'N';
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__4 = nb, i__5 = *k - i__ + 1;
- ib = min(i__4,i__5);
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__4 = nq - i__ + 1;
- dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
- lda, &tau[i__], t, &c__65);
- if (left) {
-
-/* H or H' is applied to C(i:m,1:n) */
-
- mi = *m - i__ + 1;
- ic = i__;
- } else {
-
-/* H or H' is applied to C(1:m,i:n) */
-
- ni = *n - i__ + 1;
- jc = i__;
- }
-
-/* Apply H or H' */
-
- dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
- + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
- ldc, &work[1], &ldwork);
-/* L10: */
- }
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORMLQ */
-
-} /* dormlq_ */
-
-/* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
- i__5;
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i__;
- static doublereal t[4160] /* was [65][64] */;
- static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
- static logical left;
- extern logical lsame_(char *, char *);
- static integer nbmin, iinfo;
- extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *), dlarfb_(char
- *, char *, char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static logical notran;
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORMQL overwrites the general real M-by-N matrix C with
-
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': Q * C C * Q
- TRANS = 'T': Q**T * C C * Q**T
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(k) . . . H(2) H(1)
-
- as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q**T from the Left;
- = 'R': apply Q or Q**T from the Right.
-
- TRANS (input) CHARACTER*1
- = 'N': No transpose, apply Q;
- = 'T': Transpose, apply Q**T.
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,K)
- The i-th column must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGEQLF in the last k columns of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- If SIDE = 'L', LDA >= max(1,M);
- if SIDE = 'R', LDA >= max(1,N).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQLF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the M-by-N matrix C.
- On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If SIDE = 'L', LWORK >= max(1,N);
- if SIDE = 'R', LWORK >= max(1,M).
- For optimum performance LWORK >= N*NB if SIDE = 'L', and
- LWORK >= M*NB if SIDE = 'R', where NB is the optimal
- blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
- lquery = *lwork == -1;
-
-/* NQ is the order of Q and NW is the minimum dimension of WORK */
-
- if (left) {
- nq = *m;
- nw = *n;
- } else {
- nq = *n;
- nw = *m;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,nq)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
- *info = -12;
- }
-
- if (*info == 0) {
-
-/*
- Determine the block size. NB may be at most NBMAX, where NBMAX
- is used to define the local array T.
-
- Computing MIN
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nb = min(i__1,i__2);
- lwkopt = max(1,nw) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORMQL", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- ldwork = nw;
- if ((nb > 1 && nb < *k)) {
- iws = nw * nb;
- if (*lwork < iws) {
- nb = *lwork / ldwork;
-/*
- Computing MAX
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nbmin = max(i__1,i__2);
- }
- } else {
- iws = nw;
- }
-
- if (nb < nbmin || nb >= *k) {
-
-/* Use unblocked code */
-
- dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
- c_offset], ldc, &work[1], &iinfo);
- } else {
-
-/* Use blocked code */
-
- if ((left && notran) || (! left && ! notran)) {
- i1 = 1;
- i2 = *k;
- i3 = nb;
- } else {
- i1 = (*k - 1) / nb * nb + 1;
- i2 = 1;
- i3 = -nb;
- }
-
- if (left) {
- ni = *n;
- } else {
- mi = *m;
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__4 = nb, i__5 = *k - i__ + 1;
- ib = min(i__4,i__5);
-
-/*
- Form the triangular factor of the block reflector
- H = H(i+ib-1) . . . H(i+1) H(i)
-*/
-
- i__4 = nq - *k + i__ + ib - 1;
- dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
- , lda, &tau[i__], t, &c__65);
- if (left) {
-
-/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
-
- mi = *m - *k + i__ + ib - 1;
- } else {
-
-/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
-
- ni = *n - *k + i__ + ib - 1;
- }
-
-/* Apply H or H' */
-
- dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
- i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
- work[1], &ldwork);
-/* L10: */
- }
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORMQL */
-
-} /* dormql_ */
-
-/* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n,
- integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
- i__5;
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i__;
- static doublereal t[4160] /* was [65][64] */;
- static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
- static logical left;
- extern logical lsame_(char *, char *);
- static integer nbmin, iinfo;
- extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *), dlarfb_(char
- *, char *, char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
- *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static logical notran;
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORMQR overwrites the general real M-by-N matrix C with
-
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': Q * C C * Q
- TRANS = 'T': Q**T * C C * Q**T
-
- where Q is a real orthogonal matrix defined as the product of k
- elementary reflectors
-
- Q = H(1) H(2) . . . H(k)
-
- as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N
- if SIDE = 'R'.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q**T from the Left;
- = 'R': apply Q or Q**T from the Right.
-
- TRANS (input) CHARACTER*1
- = 'N': No transpose, apply Q;
- = 'T': Transpose, apply Q**T.
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- K (input) INTEGER
- The number of elementary reflectors whose product defines
- the matrix Q.
- If SIDE = 'L', M >= K >= 0;
- if SIDE = 'R', N >= K >= 0.
-
- A (input) DOUBLE PRECISION array, dimension (LDA,K)
- The i-th column must contain the vector which defines the
- elementary reflector H(i), for i = 1,2,...,k, as returned by
- DGEQRF in the first k columns of its array argument A.
- A is modified by the routine but restored on exit.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- If SIDE = 'L', LDA >= max(1,M);
- if SIDE = 'R', LDA >= max(1,N).
-
- TAU (input) DOUBLE PRECISION array, dimension (K)
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DGEQRF.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the M-by-N matrix C.
- On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If SIDE = 'L', LWORK >= max(1,N);
- if SIDE = 'R', LWORK >= max(1,M).
- For optimum performance LWORK >= N*NB if SIDE = 'L', and
- LWORK >= M*NB if SIDE = 'R', where NB is the optimal
- blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- notran = lsame_(trans, "N");
- lquery = *lwork == -1;
-
-/* NQ is the order of Q and NW is the minimum dimension of WORK */
-
- if (left) {
- nq = *m;
- nw = *n;
- } else {
- nq = *n;
- nw = *m;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! notran && ! lsame_(trans, "T"))) {
- *info = -2;
- } else if (*m < 0) {
- *info = -3;
- } else if (*n < 0) {
- *info = -4;
- } else if (*k < 0 || *k > nq) {
- *info = -5;
- } else if (*lda < max(1,nq)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
- *info = -12;
- }
-
- if (*info == 0) {
-
-/*
- Determine the block size. NB may be at most NBMAX, where NBMAX
- is used to define the local array T.
-
- Computing MIN
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nb = min(i__1,i__2);
- lwkopt = max(1,nw) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DORMQR", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || *k == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nbmin = 2;
- ldwork = nw;
- if ((nb > 1 && nb < *k)) {
- iws = nw * nb;
- if (*lwork < iws) {
- nb = *lwork / ldwork;
-/*
- Computing MAX
- Writing concatenation
-*/
- i__3[0] = 1, a__1[0] = side;
- i__3[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
- i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, (
- ftnlen)6, (ftnlen)2);
- nbmin = max(i__1,i__2);
- }
- } else {
- iws = nw;
- }
-
- if (nb < nbmin || nb >= *k) {
-
-/* Use unblocked code */
-
- dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
- c_offset], ldc, &work[1], &iinfo);
- } else {
-
-/* Use blocked code */
-
- if ((left && ! notran) || (! left && notran)) {
- i1 = 1;
- i2 = *k;
- i3 = nb;
- } else {
- i1 = (*k - 1) / nb * nb + 1;
- i2 = 1;
- i3 = -nb;
- }
-
- if (left) {
- ni = *n;
- jc = 1;
- } else {
- mi = *m;
- ic = 1;
- }
-
- i__1 = i2;
- i__2 = i3;
- for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
-/* Computing MIN */
- i__4 = nb, i__5 = *k - i__ + 1;
- ib = min(i__4,i__5);
-
-/*
- Form the triangular factor of the block reflector
- H = H(i) H(i+1) . . . H(i+ib-1)
-*/
-
- i__4 = nq - i__ + 1;
- dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
- a_dim1], lda, &tau[i__], t, &c__65)
- ;
- if (left) {
-
-/* H or H' is applied to C(i:m,1:n) */
-
- mi = *m - i__ + 1;
- ic = i__;
- } else {
-
-/* H or H' is applied to C(1:m,i:n) */
-
- ni = *n - i__ + 1;
- jc = i__;
- }
-
-/* Apply H or H' */
-
- dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
- i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
- c_dim1], ldc, &work[1], &ldwork);
-/* L10: */
- }
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORMQR */
-
-} /* dormqr_ */
-
-/* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m,
- integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal *
- c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
-{
- /* System generated locals */
- address a__1[2];
- integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
- char ch__1[2];
-
- /* Builtin functions */
- /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
-
- /* Local variables */
- static integer i1, i2, nb, mi, ni, nq, nw;
- static logical left;
- extern logical lsame_(char *, char *);
- static integer iinfo;
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *),
- dormqr_(char *, char *, integer *, integer *, integer *,
- doublereal *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *, integer *);
- static integer lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DORMTR overwrites the general real M-by-N matrix C with
-
- SIDE = 'L' SIDE = 'R'
- TRANS = 'N': Q * C C * Q
- TRANS = 'T': Q**T * C C * Q**T
-
- where Q is a real orthogonal matrix of order nq, with nq = m if
- SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
- nq-1 elementary reflectors, as returned by DSYTRD:
-
- if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
-
- if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'L': apply Q or Q**T from the Left;
- = 'R': apply Q or Q**T from the Right.
-
- UPLO (input) CHARACTER*1
- = 'U': Upper triangle of A contains elementary reflectors
- from DSYTRD;
- = 'L': Lower triangle of A contains elementary reflectors
- from DSYTRD.
-
- TRANS (input) CHARACTER*1
- = 'N': No transpose, apply Q;
- = 'T': Transpose, apply Q**T.
-
- M (input) INTEGER
- The number of rows of the matrix C. M >= 0.
-
- N (input) INTEGER
- The number of columns of the matrix C. N >= 0.
-
- A (input) DOUBLE PRECISION array, dimension
- (LDA,M) if SIDE = 'L'
- (LDA,N) if SIDE = 'R'
- The vectors which define the elementary reflectors, as
- returned by DSYTRD.
-
- LDA (input) INTEGER
- The leading dimension of the array A.
- LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
-
- TAU (input) DOUBLE PRECISION array, dimension
- (M-1) if SIDE = 'L'
- (N-1) if SIDE = 'R'
- TAU(i) must contain the scalar factor of the elementary
- reflector H(i), as returned by DSYTRD.
-
- C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
- On entry, the M-by-N matrix C.
- On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
-
- LDC (input) INTEGER
- The leading dimension of the array C. LDC >= max(1,M).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If SIDE = 'L', LWORK >= max(1,N);
- if SIDE = 'R', LWORK >= max(1,M).
- For optimum performance LWORK >= N*NB if SIDE = 'L', and
- LWORK >= M*NB if SIDE = 'R', where NB is the optimal
- blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- =====================================================================
-
-
- Test the input arguments
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --tau;
- c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
- c__ -= c_offset;
- --work;
-
- /* Function Body */
- *info = 0;
- left = lsame_(side, "L");
- upper = lsame_(uplo, "U");
- lquery = *lwork == -1;
-
-/* NQ is the order of Q and NW is the minimum dimension of WORK */
-
- if (left) {
- nq = *m;
- nw = *n;
- } else {
- nq = *n;
- nw = *m;
- }
- if ((! left && ! lsame_(side, "R"))) {
- *info = -1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
- *info = -2;
- } else if ((! lsame_(trans, "N") && ! lsame_(trans,
- "T"))) {
- *info = -3;
- } else if (*m < 0) {
- *info = -4;
- } else if (*n < 0) {
- *info = -5;
- } else if (*lda < max(1,nq)) {
- *info = -7;
- } else if (*ldc < max(1,*m)) {
- *info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
- *info = -12;
- }
-
- if (*info == 0) {
- if (upper) {
- if (left) {
-/* Writing concatenation */
- i__1[0] = 1, a__1[0] = side;
- i__1[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
- i__2 = *m - 1;
- i__3 = *m - 1;
- nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, (
- ftnlen)6, (ftnlen)2);
- } else {
-/* Writing concatenation */
- i__1[0] = 1, a__1[0] = side;
- i__1[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
- i__2 = *n - 1;
- i__3 = *n - 1;
- nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, (
- ftnlen)6, (ftnlen)2);
- }
- } else {
- if (left) {
-/* Writing concatenation */
- i__1[0] = 1, a__1[0] = side;
- i__1[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
- i__2 = *m - 1;
- i__3 = *m - 1;
- nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, (
- ftnlen)6, (ftnlen)2);
- } else {
-/* Writing concatenation */
- i__1[0] = 1, a__1[0] = side;
- i__1[1] = 1, a__1[1] = trans;
- s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
- i__2 = *n - 1;
- i__3 = *n - 1;
- nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, (
- ftnlen)6, (ftnlen)2);
- }
- }
- lwkopt = max(1,nw) * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__2 = -(*info);
- xerbla_("DORMTR", &i__2);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*m == 0 || *n == 0 || nq == 1) {
- work[1] = 1.;
- return 0;
- }
-
- if (left) {
- mi = *m - 1;
- ni = *n;
- } else {
- mi = *m;
- ni = *n - 1;
- }
-
- if (upper) {
-
-/* Q was determined by a call to DSYTRD with UPLO = 'U' */
-
- i__2 = nq - 1;
- dormql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
- &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
- } else {
-
-/* Q was determined by a call to DSYTRD with UPLO = 'L' */
-
- if (left) {
- i1 = 2;
- i2 = 1;
- } else {
- i1 = 1;
- i2 = 2;
- }
- i__2 = nq - 1;
- dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
- c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
- }
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DORMTR */
-
-} /* dormtr_ */
-
-/* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer *
- lda, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer j;
- static doublereal ajj;
- extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *);
- static logical upper;
- extern /* Subroutine */ int xerbla_(char *, integer *);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- February 29, 1992
-
-
- Purpose
- =======
-
- DPOTF2 computes the Cholesky factorization of a real symmetric
- positive definite matrix A.
-
- The factorization has the form
- A = U' * U , if UPLO = 'U', or
- A = L * L', if UPLO = 'L',
- where U is an upper triangular matrix and L is lower triangular.
-
- This is the unblocked version of the algorithm, calling Level 2 BLAS.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- Specifies whether the upper or lower triangular part of the
- symmetric matrix A is stored.
- = 'U': Upper triangular
- = 'L': Lower triangular
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the symmetric matrix A. If UPLO = 'U', the leading
- n by n upper triangular part of A contains the upper
- triangular part of the matrix A, and the strictly lower
- triangular part of A is not referenced. If UPLO = 'L', the
- leading n by n lower triangular part of A contains the lower
- triangular part of the matrix A, and the strictly upper
- triangular part of A is not referenced.
-
- On exit, if INFO = 0, the factor U or L from the Cholesky
- factorization A = U'*U or A = L*L'.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
- positive definite, and the factorization could not be
- completed.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- *info = 0;
- upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DPOTF2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- if (upper) {
-
-/* Compute the Cholesky factorization A = U'*U. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-
-/* Compute U(J,J) and test for non-positive-definiteness. */
-
- i__2 = j - 1;
- ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1,
- &a[j * a_dim1 + 1], &c__1);
- if (ajj <= 0.) {
- a[j + j * a_dim1] = ajj;
- goto L30;
- }
- ajj = sqrt(ajj);
- a[j + j * a_dim1] = ajj;
-
-/* Compute elements J+1:N of row J. */
-
- if (j < *n) {
- i__2 = j - 1;
- i__3 = *n - j;
- dgemv_("Transpose", &i__2, &i__3, &c_b151, &a[(j + 1) *
- a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b15, &
- a[j + (j + 1) * a_dim1], lda);
- i__2 = *n - j;
- d__1 = 1. / ajj;
- dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
- }
-/* L10: */
- }
- } else {
-
-/* Compute the Cholesky factorization A = L*L'. */
-
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
-
-/* Compute L(J,J) and test for non-positive-definiteness. */
-
- i__2 = j - 1;
- ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j
- + a_dim1], lda);
- if (ajj <= 0.) {
- a[j + j * a_dim1] = ajj;
- goto L30;
- }
- ajj = sqrt(ajj);
- a[j + j * a_dim1] = ajj;
-
-/* Compute elements J+1:N of column J. */
-
- if (j < *n) {
- i__2 = *n - j;
- i__3 = j - 1;
- dgemv_("No transpose", &i__2, &i__3, &c_b151, &a[j + 1 +
- a_dim1], lda, &a[j + a_dim1], lda, &c_b15, &a[j + 1 +
- j * a_dim1], &c__1);
- i__2 = *n - j;
- d__1 = 1. / ajj;
- dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
- }
-/* L20: */
- }
- }
- goto L40;
-
-L30:
- *info = j;
-
-L40:
- return 0;
-
-/* End of DPOTF2 */
-
-} /* dpotf2_ */
-
-/* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer *
- lda, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
-
- /* Local variables */
- static integer j, jb, nb;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
- integer *, integer *, doublereal *, doublereal *, integer *,
- doublereal *, integer *);
- static logical upper;
- extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, doublereal *,
- integer *), dpotf2_(char *, integer *,
- doublereal *, integer *, integer *), xerbla_(char *,
- integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- March 31, 1993
-
-
- Purpose
- =======
-
- DPOTRF computes the Cholesky factorization of a real symmetric
- positive definite matrix A.
-
- The factorization has the form
- A = U**T * U, if UPLO = 'U', or
- A = L * L**T, if UPLO = 'L',
- where U is an upper triangular matrix and L is lower triangular.
-
- This is the block version of the algorithm, calling Level 3 BLAS.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- = 'U': Upper triangle of A is stored;
- = 'L': Lower triangle of A is stored.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the symmetric matrix A. If UPLO = 'U', the leading
- N-by-N upper triangular part of A contains the upper
- triangular part of the matrix A, and the strictly lower
- triangular part of A is not referenced. If UPLO = 'L', the
- leading N-by-N lower triangular part of A contains the lower
- triangular part of the matrix A, and the strictly upper
- triangular part of A is not referenced.
-
- On exit, if INFO = 0, the factor U or L from the Cholesky
- factorization A = U**T*U or A = L*L**T.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: if INFO = i, the leading minor of order i is not
- positive definite, and the factorization could not be
- completed.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
-
- /* Function Body */
- *info = 0;
- upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DPOTRF", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
-/* Determine the block size for this environment. */
-
- nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
- ftnlen)1);
- if (nb <= 1 || nb >= *n) {
-
-/* Use unblocked code. */
-
- dpotf2_(uplo, n, &a[a_offset], lda, info);
- } else {
-
-/* Use blocked code. */
-
- if (upper) {
-
-/* Compute the Cholesky factorization A = U'*U. */
-
- i__1 = *n;
- i__2 = nb;
- for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
-
-/*
- Update and factorize the current diagonal block and test
- for non-positive-definiteness.
-
- Computing MIN
-*/
- i__3 = nb, i__4 = *n - j + 1;
- jb = min(i__3,i__4);
- i__3 = j - 1;
- dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b151, &a[j *
- a_dim1 + 1], lda, &c_b15, &a[j + j * a_dim1], lda);
- dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
- if (*info != 0) {
- goto L30;
- }
- if (j + jb <= *n) {
-
-/* Compute the current block row. */
-
- i__3 = *n - j - jb + 1;
- i__4 = j - 1;
- dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
- c_b151, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
- a_dim1 + 1], lda, &c_b15, &a[j + (j + jb) *
- a_dim1], lda);
- i__3 = *n - j - jb + 1;
- dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
- i__3, &c_b15, &a[j + j * a_dim1], lda, &a[j + (j
- + jb) * a_dim1], lda);
- }
-/* L10: */
- }
-
- } else {
-
-/* Compute the Cholesky factorization A = L*L'. */
-
- i__2 = *n;
- i__1 = nb;
- for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
-
-/*
- Update and factorize the current diagonal block and test
- for non-positive-definiteness.
-
- Computing MIN
-*/
- i__3 = nb, i__4 = *n - j + 1;
- jb = min(i__3,i__4);
- i__3 = j - 1;
- dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b151, &a[j +
- a_dim1], lda, &c_b15, &a[j + j * a_dim1], lda);
- dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
- if (*info != 0) {
- goto L30;
- }
- if (j + jb <= *n) {
-
-/* Compute the current block column. */
-
- i__3 = *n - j - jb + 1;
- i__4 = j - 1;
- dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
- c_b151, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
- lda, &c_b15, &a[j + jb + j * a_dim1], lda);
- i__3 = *n - j - jb + 1;
- dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
- jb, &c_b15, &a[j + j * a_dim1], lda, &a[j + jb +
- j * a_dim1], lda);
- }
-/* L20: */
- }
- }
- }
- goto L40;
-
-L30:
- *info = *info + j - 1;
-
-L40:
- return 0;
-
-/* End of DPOTRF */
-
-} /* dpotrf_ */
-
-/* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__,
- doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
- integer *lwork, integer *iwork, integer *liwork, integer *info)
-{
- /* System generated locals */
- integer z_dim1, z_offset, i__1, i__2;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double log(doublereal);
- integer pow_ii(integer *, integer *);
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, j, k, m;
- static doublereal p;
- static integer ii, end, lgn;
- static doublereal eps, tiny;
- extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
- integer *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, integer *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static integer lwmin;
- extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, doublereal *, integer *, integer *);
- static integer start;
-
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlacpy_(char *, integer *, integer
- *, doublereal *, integer *, doublereal *, integer *),
- dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
- doublereal *, integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
- extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
- integer *), dlasrt_(char *, integer *, doublereal *, integer *);
- static integer liwmin, icompz;
- extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, integer *);
- static doublereal orgnrm;
- static logical lquery;
- static integer smlsiz, dtrtrw, storez;
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DSTEDC computes all eigenvalues and, optionally, eigenvectors of a
- symmetric tridiagonal matrix using the divide and conquer method.
- The eigenvectors of a full or band real symmetric matrix can also be
- found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this
- matrix to tridiagonal form.
-
- This code makes very mild assumptions about floating point
- arithmetic. It will work on machines with a guard digit in
- add/subtract, or on those binary machines without guard digits
- which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
- It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none. See DLAED3 for details.
-
- Arguments
- =========
-
- COMPZ (input) CHARACTER*1
- = 'N': Compute eigenvalues only.
- = 'I': Compute eigenvectors of tridiagonal matrix also.
- = 'V': Compute eigenvectors of original dense symmetric
- matrix also. On entry, Z contains the orthogonal
- matrix used to reduce the original matrix to
- tridiagonal form.
-
- N (input) INTEGER
- The dimension of the symmetric tridiagonal matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the diagonal elements of the tridiagonal matrix.
- On exit, if INFO = 0, the eigenvalues in ascending order.
-
- E (input/output) DOUBLE PRECISION array, dimension (N-1)
- On entry, the subdiagonal elements of the tridiagonal matrix.
- On exit, E has been destroyed.
-
- Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
- On entry, if COMPZ = 'V', then Z contains the orthogonal
- matrix used in the reduction to tridiagonal form.
- On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
- orthonormal eigenvectors of the original symmetric matrix,
- and if COMPZ = 'I', Z contains the orthonormal eigenvectors
- of the symmetric tridiagonal matrix.
- If COMPZ = 'N', then Z is not referenced.
-
- LDZ (input) INTEGER
- The leading dimension of the array Z. LDZ >= 1.
- If eigenvectors are desired, then LDZ >= max(1,N).
-
- WORK (workspace/output) DOUBLE PRECISION array,
- dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If COMPZ = 'N' or N <= 1 then LWORK must be at least 1.
- If COMPZ = 'V' and N > 1 then LWORK must be at least
- ( 1 + 3*N + 2*N*lg N + 3*N**2 ),
- where lg( N ) = smallest integer k such
- that 2**k >= N.
- If COMPZ = 'I' and N > 1 then LWORK must be at least
- ( 1 + 4*N + N**2 ).
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- IWORK (workspace/output) INTEGER array, dimension (LIWORK)
- On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
-
- LIWORK (input) INTEGER
- The dimension of the array IWORK.
- If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1.
- If COMPZ = 'V' and N > 1 then LIWORK must be at least
- ( 6 + 6*N + 5*N*lg N ).
- If COMPZ = 'I' and N > 1 then LIWORK must be at least
- ( 3 + 5*N ).
-
- If LIWORK = -1, then a workspace query is assumed; the
- routine only calculates the optimal size of the IWORK array,
- returns this value as the first entry of the IWORK array, and
- no error message related to LIWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value.
- > 0: The algorithm failed to compute an eigenvalue while
- working on the submatrix lying in rows and columns
- INFO/(N+1) through mod(INFO,N+1).
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
- Modified by Francoise Tisseur, University of Tennessee.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
- --work;
- --iwork;
-
- /* Function Body */
- *info = 0;
- lquery = *lwork == -1 || *liwork == -1;
-
- if (lsame_(compz, "N")) {
- icompz = 0;
- } else if (lsame_(compz, "V")) {
- icompz = 1;
- } else if (lsame_(compz, "I")) {
- icompz = 2;
- } else {
- icompz = -1;
- }
- if (*n <= 1 || icompz <= 0) {
- liwmin = 1;
- lwmin = 1;
- } else {
- lgn = (integer) (log((doublereal) (*n)) / log(2.));
- if (pow_ii(&c__2, &lgn) < *n) {
- ++lgn;
- }
- if (pow_ii(&c__2, &lgn) < *n) {
- ++lgn;
- }
- if (icompz == 1) {
-/* Computing 2nd power */
- i__1 = *n;
- lwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
- liwmin = *n * 6 + 6 + *n * 5 * lgn;
- } else if (icompz == 2) {
-/* Computing 2nd power */
- i__1 = *n;
- lwmin = ((*n) << (2)) + 1 + i__1 * i__1;
- liwmin = *n * 5 + 3;
- }
- }
- if (icompz < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
- *info = -6;
- } else if ((*lwork < lwmin && ! lquery)) {
- *info = -8;
- } else if ((*liwork < liwmin && ! lquery)) {
- *info = -10;
- }
-
- if (*info == 0) {
- work[1] = (doublereal) lwmin;
- iwork[1] = liwmin;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DSTEDC", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
- if (*n == 1) {
- if (icompz != 0) {
- z__[z_dim1 + 1] = 1.;
- }
- return 0;
- }
-
- smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
- ftnlen)6, (ftnlen)1);
-
-/*
- If the following conditional clause is removed, then the routine
- will use the Divide and Conquer routine to compute only the
- eigenvalues, which requires (3N + 3N**2) real workspace and
- (2 + 5N + 2N lg(N)) integer workspace.
- Since on many architectures DSTERF is much faster than any other
- algorithm for finding eigenvalues only, it is used here
- as the default.
-
- If COMPZ = 'N', use DSTERF to compute the eigenvalues.
-*/
-
- if (icompz == 0) {
- dsterf_(n, &d__[1], &e[1], info);
- return 0;
- }
-
-/*
- If N is smaller than the minimum divide size (SMLSIZ+1), then
- solve the problem with another solver.
-*/
-
- if (*n <= smlsiz) {
- if (icompz == 0) {
- dsterf_(n, &d__[1], &e[1], info);
- return 0;
- } else if (icompz == 2) {
- dsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
- info);
- return 0;
- } else {
- dsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
- info);
- return 0;
- }
- }
-
-/*
- If COMPZ = 'V', the Z matrix must be stored elsewhere for later
- use.
-*/
-
- if (icompz == 1) {
- storez = *n * *n + 1;
- } else {
- storez = 1;
- }
-
- if (icompz == 2) {
- dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
- }
-
-/* Scale. */
-
- orgnrm = dlanst_("M", n, &d__[1], &e[1]);
- if (orgnrm == 0.) {
- return 0;
- }
-
- eps = EPSILON;
-
- start = 1;
-
-/* while ( START <= N ) */
-
-L10:
- if (start <= *n) {
-
-/*
- Let END be the position of the next subdiagonal entry such that
- E( END ) <= TINY or END = N if no such subdiagonal exists. The
- matrix identified by the elements between START and END
- constitutes an independent sub-problem.
-*/
-
- end = start;
-L20:
- if (end < *n) {
- tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 =
- d__[end + 1], abs(d__2)));
- if ((d__1 = e[end], abs(d__1)) > tiny) {
- ++end;
- goto L20;
- }
- }
-
-/* (Sub) Problem determined. Compute its size and solve it. */
-
- m = end - start + 1;
- if (m == 1) {
- start = end + 1;
- goto L10;
- }
- if (m > smlsiz) {
- *info = smlsiz;
-
-/* Scale. */
-
- orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &m, &c__1, &d__[start]
- , &m, info);
- i__1 = m - 1;
- i__2 = m - 1;
- dlascl_("G", &c__0, &c__0, &orgnrm, &c_b15, &i__1, &c__1, &e[
- start], &i__2, info);
-
- if (icompz == 1) {
- dtrtrw = 1;
- } else {
- dtrtrw = start;
- }
- dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[dtrtrw +
- start * z_dim1], ldz, &work[1], n, &work[storez], &iwork[
- 1], info);
- if (*info != 0) {
- *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
- + 1) + start - 1;
- return 0;
- }
-
-/* Scale back. */
-
- dlascl_("G", &c__0, &c__0, &c_b15, &orgnrm, &m, &c__1, &d__[start]
- , &m, info);
-
- } else {
- if (icompz == 1) {
-
-/*
- Since QR won't update a Z matrix which is larger than the
- length of D, we must solve the sub-problem in a workspace and
- then multiply back into Z.
-*/
-
- dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &work[
- m * m + 1], info);
- dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
- storez], n);
- dgemm_("N", "N", n, &m, &m, &c_b15, &work[storez], ldz, &work[
- 1], &m, &c_b29, &z__[start * z_dim1 + 1], ldz);
- } else if (icompz == 2) {
- dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start *
- z_dim1], ldz, &work[1], info);
- } else {
- dsterf_(&m, &d__[start], &e[start], info);
- }
- if (*info != 0) {
- *info = start * (*n + 1) + end;
- return 0;
- }
- }
-
- start = end + 1;
- goto L10;
- }
-
-/*
- endwhile
-
- If the problem split any number of times, then the eigenvalues
- will not be properly ordered. Here we permute the eigenvalues
- (and the associated eigenvectors) into ascending order.
-*/
-
- if (m != *n) {
- if (icompz == 0) {
-
-/* Use Quick Sort */
-
- dlasrt_("I", n, &d__[1], info);
-
- } else {
-
-/* Use Selection Sort to minimize swaps of eigenvectors */
-
- i__1 = *n;
- for (ii = 2; ii <= i__1; ++ii) {
- i__ = ii - 1;
- k = i__;
- p = d__[i__];
- i__2 = *n;
- for (j = ii; j <= i__2; ++j) {
- if (d__[j] < p) {
- k = j;
- p = d__[j];
- }
-/* L30: */
- }
- if (k != i__) {
- d__[k] = d__[i__];
- d__[i__] = p;
- dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
- + 1], &c__1);
- }
-/* L40: */
- }
- }
- }
-
- work[1] = (doublereal) lwmin;
- iwork[1] = liwmin;
-
- return 0;
-
-/* End of DSTEDC */
-
-} /* dstedc_ */
-
-/* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__,
- doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
- integer *info)
-{
- /* System generated locals */
- integer z_dim1, z_offset, i__1, i__2;
- doublereal d__1, d__2;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static doublereal b, c__, f, g;
- static integer i__, j, k, l, m;
- static doublereal p, r__, s;
- static integer l1, ii, mm, lm1, mm1, nm1;
- static doublereal rt1, rt2, eps;
- static integer lsv;
- static doublereal tst, eps2;
- static integer lend, jtot;
- extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
- *, doublereal *, doublereal *);
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
- integer *, doublereal *, doublereal *, doublereal *, integer *);
- static doublereal anorm;
- extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
- doublereal *, integer *), dlaev2_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *, doublereal *,
- doublereal *);
- static integer lendm1, lendp1;
-
- static integer iscale;
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dlaset_(char *, integer *, integer
- *, doublereal *, doublereal *, doublereal *, integer *);
- static doublereal safmin;
- extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
- doublereal *, doublereal *, doublereal *);
- static doublereal safmax;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
- extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
- integer *);
- static integer lendsv;
- static doublereal ssfmin;
- static integer nmaxit, icompz;
- static doublereal ssfmax;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- September 30, 1994
-
-
- Purpose
- =======
-
- DSTEQR computes all eigenvalues and, optionally, eigenvectors of a
- symmetric tridiagonal matrix using the implicit QL or QR method.
- The eigenvectors of a full or band symmetric matrix can also be found
- if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to
- tridiagonal form.
-
- Arguments
- =========
-
- COMPZ (input) CHARACTER*1
- = 'N': Compute eigenvalues only.
- = 'V': Compute eigenvalues and eigenvectors of the original
- symmetric matrix. On entry, Z must contain the
- orthogonal matrix used to reduce the original matrix
- to tridiagonal form.
- = 'I': Compute eigenvalues and eigenvectors of the
- tridiagonal matrix. Z is initialized to the identity
- matrix.
-
- N (input) INTEGER
- The order of the matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the diagonal elements of the tridiagonal matrix.
- On exit, if INFO = 0, the eigenvalues in ascending order.
-
- E (input/output) DOUBLE PRECISION array, dimension (N-1)
- On entry, the (n-1) subdiagonal elements of the tridiagonal
- matrix.
- On exit, E has been destroyed.
-
- Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N)
- On entry, if COMPZ = 'V', then Z contains the orthogonal
- matrix used in the reduction to tridiagonal form.
- On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
- orthonormal eigenvectors of the original symmetric matrix,
- and if COMPZ = 'I', Z contains the orthonormal eigenvectors
- of the symmetric tridiagonal matrix.
- If COMPZ = 'N', then Z is not referenced.
-
- LDZ (input) INTEGER
- The leading dimension of the array Z. LDZ >= 1, and if
- eigenvectors are desired, then LDZ >= max(1,N).
-
- WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2))
- If COMPZ = 'N', then WORK is not referenced.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: the algorithm has failed to find all the eigenvalues in
- a total of 30*N iterations; if INFO = i, then i
- elements of E have not converged to zero; on exit, D
- and E contain the elements of a symmetric tridiagonal
- matrix which is orthogonally similar to the original
- matrix.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --d__;
- --e;
- z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
- z__ -= z_offset;
- --work;
-
- /* Function Body */
- *info = 0;
-
- if (lsame_(compz, "N")) {
- icompz = 0;
- } else if (lsame_(compz, "V")) {
- icompz = 1;
- } else if (lsame_(compz, "I")) {
- icompz = 2;
- } else {
- icompz = -1;
- }
- if (icompz < 0) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
- *info = -6;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DSTEQR", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- if (*n == 1) {
- if (icompz == 2) {
- z__[z_dim1 + 1] = 1.;
- }
- return 0;
- }
-
-/* Determine the unit roundoff and over/underflow thresholds. */
-
- eps = EPSILON;
-/* Computing 2nd power */
- d__1 = eps;
- eps2 = d__1 * d__1;
- safmin = SAFEMINIMUM;
- safmax = 1. / safmin;
- ssfmax = sqrt(safmax) / 3.;
- ssfmin = sqrt(safmin) / eps2;
-
-/*
- Compute the eigenvalues and eigenvectors of the tridiagonal
- matrix.
-*/
-
- if (icompz == 2) {
- dlaset_("Full", n, n, &c_b29, &c_b15, &z__[z_offset], ldz);
- }
-
- nmaxit = *n * 30;
- jtot = 0;
-
-/*
- Determine where the matrix splits and choose QL or QR iteration
- for each block, according to whether top or bottom diagonal
- element is smaller.
-*/
-
- l1 = 1;
- nm1 = *n - 1;
-
-L10:
- if (l1 > *n) {
- goto L160;
- }
- if (l1 > 1) {
- e[l1 - 1] = 0.;
- }
- if (l1 <= nm1) {
- i__1 = nm1;
- for (m = l1; m <= i__1; ++m) {
- tst = (d__1 = e[m], abs(d__1));
- if (tst == 0.) {
- goto L30;
- }
- if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
- + 1], abs(d__2))) * eps) {
- e[m] = 0.;
- goto L30;
- }
-/* L20: */
- }
- }
- m = *n;
-
-L30:
- l = l1;
- lsv = l;
- lend = m;
- lendsv = lend;
- l1 = m + 1;
- if (lend == l) {
- goto L10;
- }
-
-/* Scale submatrix in rows and columns L to LEND */
-
- i__1 = lend - l + 1;
- anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
- iscale = 0;
- if (anorm == 0.) {
- goto L10;
- }
- if (anorm > ssfmax) {
- iscale = 1;
- i__1 = lend - l + 1;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
- info);
- i__1 = lend - l;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
- info);
- } else if (anorm < ssfmin) {
- iscale = 2;
- i__1 = lend - l + 1;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
- info);
- i__1 = lend - l;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
- info);
- }
-
-/* Choose between QL and QR iteration */
-
- if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
- lend = lsv;
- l = lendsv;
- }
-
- if (lend > l) {
-
-/*
- QL Iteration
-
- Look for small subdiagonal element.
-*/
-
-L40:
- if (l != lend) {
- lendm1 = lend - 1;
- i__1 = lendm1;
- for (m = l; m <= i__1; ++m) {
-/* Computing 2nd power */
- d__2 = (d__1 = e[m], abs(d__1));
- tst = d__2 * d__2;
- if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
- + 1], abs(d__2)) + safmin) {
- goto L60;
- }
-/* L50: */
- }
- }
-
- m = lend;
-
-L60:
- if (m < lend) {
- e[m] = 0.;
- }
- p = d__[l];
- if (m == l) {
- goto L80;
- }
-
-/*
- If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
- to compute its eigensystem.
-*/
-
- if (m == l + 1) {
- if (icompz > 0) {
- dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
- work[l] = c__;
- work[*n - 1 + l] = s;
- dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
- z__[l * z_dim1 + 1], ldz);
- } else {
- dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
- }
- d__[l] = rt1;
- d__[l + 1] = rt2;
- e[l] = 0.;
- l += 2;
- if (l <= lend) {
- goto L40;
- }
- goto L140;
- }
-
- if (jtot == nmaxit) {
- goto L140;
- }
- ++jtot;
-
-/* Form shift. */
-
- g = (d__[l + 1] - p) / (e[l] * 2.);
- r__ = dlapy2_(&g, &c_b15);
- g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
-
- s = 1.;
- c__ = 1.;
- p = 0.;
-
-/* Inner loop */
-
- mm1 = m - 1;
- i__1 = l;
- for (i__ = mm1; i__ >= i__1; --i__) {
- f = s * e[i__];
- b = c__ * e[i__];
- dlartg_(&g, &f, &c__, &s, &r__);
- if (i__ != m - 1) {
- e[i__ + 1] = r__;
- }
- g = d__[i__ + 1] - p;
- r__ = (d__[i__] - g) * s + c__ * 2. * b;
- p = s * r__;
- d__[i__ + 1] = g + p;
- g = c__ * r__ - b;
-
-/* If eigenvectors are desired, then save rotations. */
-
- if (icompz > 0) {
- work[i__] = c__;
- work[*n - 1 + i__] = -s;
- }
-
-/* L70: */
- }
-
-/* If eigenvectors are desired, then apply saved rotations. */
-
- if (icompz > 0) {
- mm = m - l + 1;
- dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
- * z_dim1 + 1], ldz);
- }
-
- d__[l] -= p;
- e[l] = g;
- goto L40;
-
-/* Eigenvalue found. */
-
-L80:
- d__[l] = p;
-
- ++l;
- if (l <= lend) {
- goto L40;
- }
- goto L140;
-
- } else {
-
-/*
- QR Iteration
-
- Look for small superdiagonal element.
-*/
-
-L90:
- if (l != lend) {
- lendp1 = lend + 1;
- i__1 = lendp1;
- for (m = l; m >= i__1; --m) {
-/* Computing 2nd power */
- d__2 = (d__1 = e[m - 1], abs(d__1));
- tst = d__2 * d__2;
- if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
- - 1], abs(d__2)) + safmin) {
- goto L110;
- }
-/* L100: */
- }
- }
-
- m = lend;
-
-L110:
- if (m > lend) {
- e[m - 1] = 0.;
- }
- p = d__[l];
- if (m == l) {
- goto L130;
- }
-
-/*
- If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
- to compute its eigensystem.
-*/
-
- if (m == l - 1) {
- if (icompz > 0) {
- dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
- ;
- work[m] = c__;
- work[*n - 1 + m] = s;
- dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
- z__[(l - 1) * z_dim1 + 1], ldz);
- } else {
- dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
- }
- d__[l - 1] = rt1;
- d__[l] = rt2;
- e[l - 1] = 0.;
- l += -2;
- if (l >= lend) {
- goto L90;
- }
- goto L140;
- }
-
- if (jtot == nmaxit) {
- goto L140;
- }
- ++jtot;
-
-/* Form shift. */
-
- g = (d__[l - 1] - p) / (e[l - 1] * 2.);
- r__ = dlapy2_(&g, &c_b15);
- g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
-
- s = 1.;
- c__ = 1.;
- p = 0.;
-
-/* Inner loop */
-
- lm1 = l - 1;
- i__1 = lm1;
- for (i__ = m; i__ <= i__1; ++i__) {
- f = s * e[i__];
- b = c__ * e[i__];
- dlartg_(&g, &f, &c__, &s, &r__);
- if (i__ != m) {
- e[i__ - 1] = r__;
- }
- g = d__[i__] - p;
- r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
- p = s * r__;
- d__[i__] = g + p;
- g = c__ * r__ - b;
-
-/* If eigenvectors are desired, then save rotations. */
-
- if (icompz > 0) {
- work[i__] = c__;
- work[*n - 1 + i__] = s;
- }
-
-/* L120: */
- }
-
-/* If eigenvectors are desired, then apply saved rotations. */
-
- if (icompz > 0) {
- mm = l - m + 1;
- dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
- * z_dim1 + 1], ldz);
- }
-
- d__[l] -= p;
- e[lm1] = g;
- goto L90;
-
-/* Eigenvalue found. */
-
-L130:
- d__[l] = p;
-
- --l;
- if (l >= lend) {
- goto L90;
- }
- goto L140;
-
- }
-
-/* Undo scaling if necessary */
-
-L140:
- if (iscale == 1) {
- i__1 = lendsv - lsv + 1;
- dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
- n, info);
- i__1 = lendsv - lsv;
- dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
- info);
- } else if (iscale == 2) {
- i__1 = lendsv - lsv + 1;
- dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
- n, info);
- i__1 = lendsv - lsv;
- dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
- info);
- }
-
-/*
- Check for no convergence to an eigenvalue after a total
- of N*MAXIT iterations.
-*/
-
- if (jtot < nmaxit) {
- goto L10;
- }
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (e[i__] != 0.) {
- ++(*info);
- }
-/* L150: */
- }
- goto L190;
-
-/* Order eigenvalues and eigenvectors. */
-
-L160:
- if (icompz == 0) {
-
-/* Use Quick Sort */
-
- dlasrt_("I", n, &d__[1], info);
-
- } else {
-
-/* Use Selection Sort to minimize swaps of eigenvectors */
-
- i__1 = *n;
- for (ii = 2; ii <= i__1; ++ii) {
- i__ = ii - 1;
- k = i__;
- p = d__[i__];
- i__2 = *n;
- for (j = ii; j <= i__2; ++j) {
- if (d__[j] < p) {
- k = j;
- p = d__[j];
- }
-/* L170: */
- }
- if (k != i__) {
- d__[k] = d__[i__];
- d__[i__] = p;
- dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
- &c__1);
- }
-/* L180: */
- }
- }
-
-L190:
- return 0;
-
-/* End of DSTEQR */
-
-} /* dsteqr_ */
-
-/* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e,
- integer *info)
-{
- /* System generated locals */
- integer i__1;
- doublereal d__1, d__2, d__3;
-
- /* Builtin functions */
- double sqrt(doublereal), d_sign(doublereal *, doublereal *);
-
- /* Local variables */
- static doublereal c__;
- static integer i__, l, m;
- static doublereal p, r__, s;
- static integer l1;
- static doublereal bb, rt1, rt2, eps, rte;
- static integer lsv;
- static doublereal eps2, oldc;
- static integer lend, jtot;
- extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
- *, doublereal *, doublereal *);
- static doublereal gamma, alpha, sigma, anorm;
-
- static integer iscale;
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *);
- static doublereal oldgam, safmin;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal safmax;
- extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
- extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
- integer *);
- static integer lendsv;
- static doublereal ssfmin;
- static integer nmaxit;
- static doublereal ssfmax;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DSTERF computes all eigenvalues of a symmetric tridiagonal matrix
- using the Pal-Walker-Kahan variant of the QL or QR algorithm.
-
- Arguments
- =========
-
- N (input) INTEGER
- The order of the matrix. N >= 0.
-
- D (input/output) DOUBLE PRECISION array, dimension (N)
- On entry, the n diagonal elements of the tridiagonal matrix.
- On exit, if INFO = 0, the eigenvalues in ascending order.
-
- E (input/output) DOUBLE PRECISION array, dimension (N-1)
- On entry, the (n-1) subdiagonal elements of the tridiagonal
- matrix.
- On exit, E has been destroyed.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: the algorithm failed to find all of the eigenvalues in
- a total of 30*N iterations; if INFO = i, then i
- elements of E have not converged to zero.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- --e;
- --d__;
-
- /* Function Body */
- *info = 0;
-
-/* Quick return if possible */
-
- if (*n < 0) {
- *info = -1;
- i__1 = -(*info);
- xerbla_("DSTERF", &i__1);
- return 0;
- }
- if (*n <= 1) {
- return 0;
- }
-
-/* Determine the unit roundoff for this environment. */
-
- eps = EPSILON;
-/* Computing 2nd power */
- d__1 = eps;
- eps2 = d__1 * d__1;
- safmin = SAFEMINIMUM;
- safmax = 1. / safmin;
- ssfmax = sqrt(safmax) / 3.;
- ssfmin = sqrt(safmin) / eps2;
-
-/* Compute the eigenvalues of the tridiagonal matrix. */
-
- nmaxit = *n * 30;
- sigma = 0.;
- jtot = 0;
-
-/*
- Determine where the matrix splits and choose QL or QR iteration
- for each block, according to whether top or bottom diagonal
- element is smaller.
-*/
-
- l1 = 1;
-
-L10:
- if (l1 > *n) {
- goto L170;
- }
- if (l1 > 1) {
- e[l1 - 1] = 0.;
- }
- i__1 = *n - 1;
- for (m = l1; m <= i__1; ++m) {
- if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) *
- sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) {
- e[m] = 0.;
- goto L30;
- }
-/* L20: */
- }
- m = *n;
-
-L30:
- l = l1;
- lsv = l;
- lend = m;
- lendsv = lend;
- l1 = m + 1;
- if (lend == l) {
- goto L10;
- }
-
-/* Scale submatrix in rows and columns L to LEND */
-
- i__1 = lend - l + 1;
- anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
- iscale = 0;
- if (anorm > ssfmax) {
- iscale = 1;
- i__1 = lend - l + 1;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
- info);
- i__1 = lend - l;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
- info);
- } else if (anorm < ssfmin) {
- iscale = 2;
- i__1 = lend - l + 1;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
- info);
- i__1 = lend - l;
- dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
- info);
- }
-
- i__1 = lend - 1;
- for (i__ = l; i__ <= i__1; ++i__) {
-/* Computing 2nd power */
- d__1 = e[i__];
- e[i__] = d__1 * d__1;
-/* L40: */
- }
-
-/* Choose between QL and QR iteration */
-
- if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
- lend = lsv;
- l = lendsv;
- }
-
- if (lend >= l) {
-
-/*
- QL Iteration
-
- Look for small subdiagonal element.
-*/
-
-L50:
- if (l != lend) {
- i__1 = lend - 1;
- for (m = l; m <= i__1; ++m) {
- if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
- + 1], abs(d__1))) {
- goto L70;
- }
-/* L60: */
- }
- }
- m = lend;
-
-L70:
- if (m < lend) {
- e[m] = 0.;
- }
- p = d__[l];
- if (m == l) {
- goto L90;
- }
-
-/*
- If remaining matrix is 2 by 2, use DLAE2 to compute its
- eigenvalues.
-*/
-
- if (m == l + 1) {
- rte = sqrt(e[l]);
- dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
- d__[l] = rt1;
- d__[l + 1] = rt2;
- e[l] = 0.;
- l += 2;
- if (l <= lend) {
- goto L50;
- }
- goto L150;
- }
-
- if (jtot == nmaxit) {
- goto L150;
- }
- ++jtot;
-
-/* Form shift. */
-
- rte = sqrt(e[l]);
- sigma = (d__[l + 1] - p) / (rte * 2.);
- r__ = dlapy2_(&sigma, &c_b15);
- sigma = p - rte / (sigma + d_sign(&r__, &sigma));
-
- c__ = 1.;
- s = 0.;
- gamma = d__[m] - sigma;
- p = gamma * gamma;
-
-/* Inner loop */
-
- i__1 = l;
- for (i__ = m - 1; i__ >= i__1; --i__) {
- bb = e[i__];
- r__ = p + bb;
- if (i__ != m - 1) {
- e[i__ + 1] = s * r__;
- }
- oldc = c__;
- c__ = p / r__;
- s = bb / r__;
- oldgam = gamma;
- alpha = d__[i__];
- gamma = c__ * (alpha - sigma) - s * oldgam;
- d__[i__ + 1] = oldgam + (alpha - gamma);
- if (c__ != 0.) {
- p = gamma * gamma / c__;
- } else {
- p = oldc * bb;
- }
-/* L80: */
- }
-
- e[l] = s * p;
- d__[l] = sigma + gamma;
- goto L50;
-
-/* Eigenvalue found. */
-
-L90:
- d__[l] = p;
-
- ++l;
- if (l <= lend) {
- goto L50;
- }
- goto L150;
-
- } else {
-
-/*
- QR Iteration
-
- Look for small superdiagonal element.
-*/
-
-L100:
- i__1 = lend + 1;
- for (m = l; m >= i__1; --m) {
- if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
- - 1], abs(d__1))) {
- goto L120;
- }
-/* L110: */
- }
- m = lend;
-
-L120:
- if (m > lend) {
- e[m - 1] = 0.;
- }
- p = d__[l];
- if (m == l) {
- goto L140;
- }
-
-/*
- If remaining matrix is 2 by 2, use DLAE2 to compute its
- eigenvalues.
-*/
-
- if (m == l - 1) {
- rte = sqrt(e[l - 1]);
- dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
- d__[l] = rt1;
- d__[l - 1] = rt2;
- e[l - 1] = 0.;
- l += -2;
- if (l >= lend) {
- goto L100;
- }
- goto L150;
- }
-
- if (jtot == nmaxit) {
- goto L150;
- }
- ++jtot;
-
-/* Form shift. */
-
- rte = sqrt(e[l - 1]);
- sigma = (d__[l - 1] - p) / (rte * 2.);
- r__ = dlapy2_(&sigma, &c_b15);
- sigma = p - rte / (sigma + d_sign(&r__, &sigma));
-
- c__ = 1.;
- s = 0.;
- gamma = d__[m] - sigma;
- p = gamma * gamma;
-
-/* Inner loop */
-
- i__1 = l - 1;
- for (i__ = m; i__ <= i__1; ++i__) {
- bb = e[i__];
- r__ = p + bb;
- if (i__ != m) {
- e[i__ - 1] = s * r__;
- }
- oldc = c__;
- c__ = p / r__;
- s = bb / r__;
- oldgam = gamma;
- alpha = d__[i__ + 1];
- gamma = c__ * (alpha - sigma) - s * oldgam;
- d__[i__] = oldgam + (alpha - gamma);
- if (c__ != 0.) {
- p = gamma * gamma / c__;
- } else {
- p = oldc * bb;
- }
-/* L130: */
- }
-
- e[l - 1] = s * p;
- d__[l] = sigma + gamma;
- goto L100;
-
-/* Eigenvalue found. */
-
-L140:
- d__[l] = p;
-
- --l;
- if (l >= lend) {
- goto L100;
- }
- goto L150;
-
- }
-
-/* Undo scaling if necessary */
-
-L150:
- if (iscale == 1) {
- i__1 = lendsv - lsv + 1;
- dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
- n, info);
- }
- if (iscale == 2) {
- i__1 = lendsv - lsv + 1;
- dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
- n, info);
- }
-
-/*
- Check for no convergence to an eigenvalue after a total
- of N*MAXIT iterations.
-*/
-
- if (jtot < nmaxit) {
- goto L10;
- }
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
- if (e[i__] != 0.) {
- ++(*info);
- }
-/* L160: */
- }
- goto L180;
-
-/* Sort eigenvalues in increasing order. */
-
-L170:
- dlasrt_("I", n, &d__[1], info);
-
-L180:
- return 0;
-
-/* End of DSTERF */
-
-} /* dsterf_ */
-
-/* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal *
- a, integer *lda, doublereal *w, doublereal *work, integer *lwork,
- integer *iwork, integer *liwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
- doublereal d__1;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static doublereal eps;
- static integer inde;
- static doublereal anrm, rmin, rmax;
- static integer lopt;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- static doublereal sigma;
- extern logical lsame_(char *, char *);
- static integer iinfo, lwmin, liopt;
- static logical lower, wantz;
- static integer indwk2, llwrk2;
-
- static integer iscale;
- extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, integer *, doublereal *,
- integer *, integer *), dstedc_(char *, integer *,
- doublereal *, doublereal *, doublereal *, integer *, doublereal *,
- integer *, integer *, integer *, integer *), dlacpy_(
- char *, integer *, integer *, doublereal *, integer *, doublereal
- *, integer *);
- static doublereal safmin;
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal bignum;
- static integer indtau;
- extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
- integer *);
- extern doublereal dlansy_(char *, char *, integer *, doublereal *,
- integer *, doublereal *);
- static integer indwrk, liwmin;
- extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *,
- integer *, doublereal *, integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, doublereal *, doublereal *, integer *,
- integer *);
- static integer llwork;
- static doublereal smlnum;
- static logical lquery;
-
-
-/*
- -- LAPACK driver routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DSYEVD computes all eigenvalues and, optionally, eigenvectors of a
- real symmetric matrix A. If eigenvectors are desired, it uses a
- divide and conquer algorithm.
-
- The divide and conquer algorithm makes very mild assumptions about
- floating point arithmetic. It will work on machines with a guard
- digit in add/subtract, or on those binary machines without guard
- digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
- Cray-2. It could conceivably fail on hexadecimal or decimal machines
- without guard digits, but we know of none.
-
- Because of large use of BLAS of level 3, DSYEVD needs N**2 more
- workspace than DSYEVX.
-
- Arguments
- =========
-
- JOBZ (input) CHARACTER*1
- = 'N': Compute eigenvalues only;
- = 'V': Compute eigenvalues and eigenvectors.
-
- UPLO (input) CHARACTER*1
- = 'U': Upper triangle of A is stored;
- = 'L': Lower triangle of A is stored.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA, N)
- On entry, the symmetric matrix A. If UPLO = 'U', the
- leading N-by-N upper triangular part of A contains the
- upper triangular part of the matrix A. If UPLO = 'L',
- the leading N-by-N lower triangular part of A contains
- the lower triangular part of the matrix A.
- On exit, if JOBZ = 'V', then if INFO = 0, A contains the
- orthonormal eigenvectors of the matrix A.
- If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
- or the upper triangle (if UPLO='U') of A, including the
- diagonal, is destroyed.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- W (output) DOUBLE PRECISION array, dimension (N)
- If INFO = 0, the eigenvalues in ascending order.
-
- WORK (workspace/output) DOUBLE PRECISION array,
- dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK.
- If N <= 1, LWORK must be at least 1.
- If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1.
- If JOBZ = 'V' and N > 1, LWORK must be at least
- 1 + 6*N + 2*N**2.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- IWORK (workspace/output) INTEGER array, dimension (LIWORK)
- On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
-
- LIWORK (input) INTEGER
- The dimension of the array IWORK.
- If N <= 1, LIWORK must be at least 1.
- If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
- If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
-
- If LIWORK = -1, then a workspace query is assumed; the
- routine only calculates the optimal size of the IWORK array,
- returns this value as the first entry of the IWORK array, and
- no error message related to LIWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
- > 0: if INFO = i, the algorithm failed to converge; i
- off-diagonal elements of an intermediate tridiagonal
- form did not converge to zero.
-
- Further Details
- ===============
-
- Based on contributions by
- Jeff Rutter, Computer Science Division, University of California
- at Berkeley, USA
- Modified by Francoise Tisseur, University of Tennessee.
-
- =====================================================================
-
-
- Test the input parameters.
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --w;
- --work;
- --iwork;
-
- /* Function Body */
- wantz = lsame_(jobz, "V");
- lower = lsame_(uplo, "L");
- lquery = *lwork == -1 || *liwork == -1;
-
- *info = 0;
- if (*n <= 1) {
- liwmin = 1;
- lwmin = 1;
- lopt = lwmin;
- liopt = liwmin;
- } else {
- if (wantz) {
- liwmin = *n * 5 + 3;
-/* Computing 2nd power */
- i__1 = *n;
- lwmin = *n * 6 + 1 + ((i__1 * i__1) << (1));
- } else {
- liwmin = 1;
- lwmin = ((*n) << (1)) + 1;
- }
- lopt = lwmin;
- liopt = liwmin;
- }
- if (! (wantz || lsame_(jobz, "N"))) {
- *info = -1;
- } else if (! (lower || lsame_(uplo, "U"))) {
- *info = -2;
- } else if (*n < 0) {
- *info = -3;
- } else if (*lda < max(1,*n)) {
- *info = -5;
- } else if ((*lwork < lwmin && ! lquery)) {
- *info = -8;
- } else if ((*liwork < liwmin && ! lquery)) {
- *info = -10;
- }
-
- if (*info == 0) {
- work[1] = (doublereal) lopt;
- iwork[1] = liopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DSYEVD", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- return 0;
- }
-
- if (*n == 1) {
- w[1] = a[a_dim1 + 1];
- if (wantz) {
- a[a_dim1 + 1] = 1.;
- }
- return 0;
- }
-
-/* Get machine constants. */
-
- safmin = SAFEMINIMUM;
- eps = PRECISION;
- smlnum = safmin / eps;
- bignum = 1. / smlnum;
- rmin = sqrt(smlnum);
- rmax = sqrt(bignum);
-
-/* Scale matrix to allowable range, if necessary. */
-
- anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
- iscale = 0;
- if ((anrm > 0. && anrm < rmin)) {
- iscale = 1;
- sigma = rmin / anrm;
- } else if (anrm > rmax) {
- iscale = 1;
- sigma = rmax / anrm;
- }
- if (iscale == 1) {
- dlascl_(uplo, &c__0, &c__0, &c_b15, &sigma, n, n, &a[a_offset], lda,
- info);
- }
-
-/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
-
- inde = 1;
- indtau = inde + *n;
- indwrk = indtau + *n;
- llwork = *lwork - indwrk + 1;
- indwk2 = indwrk + *n * *n;
- llwrk2 = *lwork - indwk2 + 1;
-
- dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
- work[indwrk], &llwork, &iinfo);
- lopt = (integer) (((*n) << (1)) + work[indwrk]);
-
-/*
- For eigenvalues only, call DSTERF. For eigenvectors, first call
- DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
- tridiagonal matrix, then call DORMTR to multiply it by the
- Householder transformations stored in A.
-*/
-
- if (! wantz) {
- dsterf_(n, &w[1], &work[inde], info);
- } else {
- dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
- llwrk2, &iwork[1], liwork, info);
- dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
- indwrk], n, &work[indwk2], &llwrk2, &iinfo);
- dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
-/*
- Computing MAX
- Computing 2nd power
-*/
- i__3 = *n;
- i__1 = lopt, i__2 = *n * 6 + 1 + ((i__3 * i__3) << (1));
- lopt = max(i__1,i__2);
- }
-
-/* If matrix was scaled, then rescale eigenvalues appropriately. */
-
- if (iscale == 1) {
- d__1 = 1. / sigma;
- dscal_(n, &d__1, &w[1], &c__1);
- }
-
- work[1] = (doublereal) lopt;
- iwork[1] = liopt;
-
- return 0;
-
-/* End of DSYEVD */
-
-} /* dsyevd_ */
-
-/* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer *
- lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__;
- extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- static doublereal taui;
- extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- integer *);
- static doublereal alpha;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *);
- static logical upper;
- extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
- doublereal *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *), dlarfg_(integer *, doublereal *,
- doublereal *, integer *, doublereal *), xerbla_(char *, integer *
- );
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- October 31, 1992
-
-
- Purpose
- =======
-
- DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal
- form T by an orthogonal similarity transformation: Q' * A * Q = T.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- Specifies whether the upper or lower triangular part of the
- symmetric matrix A is stored:
- = 'U': Upper triangular
- = 'L': Lower triangular
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the symmetric matrix A. If UPLO = 'U', the leading
- n-by-n upper triangular part of A contains the upper
- triangular part of the matrix A, and the strictly lower
- triangular part of A is not referenced. If UPLO = 'L', the
- leading n-by-n lower triangular part of A contains the lower
- triangular part of the matrix A, and the strictly upper
- triangular part of A is not referenced.
- On exit, if UPLO = 'U', the diagonal and first superdiagonal
- of A are overwritten by the corresponding elements of the
- tridiagonal matrix T, and the elements above the first
- superdiagonal, with the array TAU, represent the orthogonal
- matrix Q as a product of elementary reflectors; if UPLO
- = 'L', the diagonal and first subdiagonal of A are over-
- written by the corresponding elements of the tridiagonal
- matrix T, and the elements below the first subdiagonal, with
- the array TAU, represent the orthogonal matrix Q as a product
- of elementary reflectors. See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- D (output) DOUBLE PRECISION array, dimension (N)
- The diagonal elements of the tridiagonal matrix T:
- D(i) = A(i,i).
-
- E (output) DOUBLE PRECISION array, dimension (N-1)
- The off-diagonal elements of the tridiagonal matrix T:
- E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
-
- TAU (output) DOUBLE PRECISION array, dimension (N-1)
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
-
- Further Details
- ===============
-
- If UPLO = 'U', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(n-1) . . . H(2) H(1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
- A(1:i-1,i+1), and tau in TAU(i).
-
- If UPLO = 'L', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(1) H(2) . . . H(n-1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
- and tau in TAU(i).
-
- The contents of A on exit are illustrated by the following examples
- with n = 5:
-
- if UPLO = 'U': if UPLO = 'L':
-
- ( d e v2 v3 v4 ) ( d )
- ( d e v3 v4 ) ( e d )
- ( d e v4 ) ( v1 e d )
- ( d e ) ( v1 v2 e d )
- ( d ) ( v1 v2 v3 e d )
-
- where d and e denote diagonal and off-diagonal elements of T, and vi
- denotes an element of the vector defining H(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --d__;
- --e;
- --tau;
-
- /* Function Body */
- *info = 0;
- upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DSYTD2", &i__1);
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n <= 0) {
- return 0;
- }
-
- if (upper) {
-
-/* Reduce the upper triangle of A */
-
- for (i__ = *n - 1; i__ >= 1; --i__) {
-
-/*
- Generate elementary reflector H(i) = I - tau * v * v'
- to annihilate A(1:i-1,i+1)
-*/
-
- dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
- + 1], &c__1, &taui);
- e[i__] = a[i__ + (i__ + 1) * a_dim1];
-
- if (taui != 0.) {
-
-/* Apply H(i) from both sides to A(1:i,1:i) */
-
- a[i__ + (i__ + 1) * a_dim1] = 1.;
-
-/* Compute x := tau * A * v storing x in TAU(1:i) */
-
- dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
- a_dim1 + 1], &c__1, &c_b29, &tau[1], &c__1)
- ;
-
-/* Compute w := x - 1/2 * tau * (x'*v) * v */
-
- alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
- * a_dim1 + 1], &c__1);
- daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
- 1], &c__1);
-
-/*
- Apply the transformation as a rank-2 update:
- A := A - v * w' - w * v'
-*/
-
- dsyr2_(uplo, &i__, &c_b151, &a[(i__ + 1) * a_dim1 + 1], &c__1,
- &tau[1], &c__1, &a[a_offset], lda);
-
- a[i__ + (i__ + 1) * a_dim1] = e[i__];
- }
- d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
- tau[i__] = taui;
-/* L10: */
- }
- d__[1] = a[a_dim1 + 1];
- } else {
-
-/* Reduce the lower triangle of A */
-
- i__1 = *n - 1;
- for (i__ = 1; i__ <= i__1; ++i__) {
-
-/*
- Generate elementary reflector H(i) = I - tau * v * v'
- to annihilate A(i+2:n,i)
-*/
-
- i__2 = *n - i__;
-/* Computing MIN */
- i__3 = i__ + 2;
- dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
- a_dim1], &c__1, &taui);
- e[i__] = a[i__ + 1 + i__ * a_dim1];
-
- if (taui != 0.) {
-
-/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
-
- a[i__ + 1 + i__ * a_dim1] = 1.;
-
-/* Compute x := tau * A * v storing y in TAU(i:n-1) */
-
- i__2 = *n - i__;
- dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
- lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b29, &tau[
- i__], &c__1);
-
-/* Compute w := x - 1/2 * tau * (x'*v) * v */
-
- i__2 = *n - i__;
- alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ +
- 1 + i__ * a_dim1], &c__1);
- i__2 = *n - i__;
- daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
- i__], &c__1);
-
-/*
- Apply the transformation as a rank-2 update:
- A := A - v * w' - w * v'
-*/
-
- i__2 = *n - i__;
- dsyr2_(uplo, &i__2, &c_b151, &a[i__ + 1 + i__ * a_dim1], &
- c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) *
- a_dim1], lda);
-
- a[i__ + 1 + i__ * a_dim1] = e[i__];
- }
- d__[i__] = a[i__ + i__ * a_dim1];
- tau[i__] = taui;
-/* L20: */
- }
- d__[*n] = a[*n + *n * a_dim1];
- }
-
- return 0;
-
-/* End of DSYTD2 */
-
-} /* dsytd2_ */
-
-/* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer *
- lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal *
- work, integer *lwork, integer *info)
-{
- /* System generated locals */
- integer a_dim1, a_offset, i__1, i__2, i__3;
-
- /* Local variables */
- static integer i__, j, nb, kk, nx, iws;
- extern logical lsame_(char *, char *);
- static integer nbmin, iinfo;
- static logical upper;
- extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *,
- integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal
- *, doublereal *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, integer *), dlatrd_(char *,
- integer *, integer *, doublereal *, integer *, doublereal *,
- doublereal *, doublereal *, integer *), xerbla_(char *,
- integer *);
- extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
- integer *, integer *, ftnlen, ftnlen);
- static integer ldwork, lwkopt;
- static logical lquery;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DSYTRD reduces a real symmetric matrix A to real symmetric
- tridiagonal form T by an orthogonal similarity transformation:
- Q**T * A * Q = T.
-
- Arguments
- =========
-
- UPLO (input) CHARACTER*1
- = 'U': Upper triangle of A is stored;
- = 'L': Lower triangle of A is stored.
-
- N (input) INTEGER
- The order of the matrix A. N >= 0.
-
- A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
- On entry, the symmetric matrix A. If UPLO = 'U', the leading
- N-by-N upper triangular part of A contains the upper
- triangular part of the matrix A, and the strictly lower
- triangular part of A is not referenced. If UPLO = 'L', the
- leading N-by-N lower triangular part of A contains the lower
- triangular part of the matrix A, and the strictly upper
- triangular part of A is not referenced.
- On exit, if UPLO = 'U', the diagonal and first superdiagonal
- of A are overwritten by the corresponding elements of the
- tridiagonal matrix T, and the elements above the first
- superdiagonal, with the array TAU, represent the orthogonal
- matrix Q as a product of elementary reflectors; if UPLO
- = 'L', the diagonal and first subdiagonal of A are over-
- written by the corresponding elements of the tridiagonal
- matrix T, and the elements below the first subdiagonal, with
- the array TAU, represent the orthogonal matrix Q as a product
- of elementary reflectors. See Further Details.
-
- LDA (input) INTEGER
- The leading dimension of the array A. LDA >= max(1,N).
-
- D (output) DOUBLE PRECISION array, dimension (N)
- The diagonal elements of the tridiagonal matrix T:
- D(i) = A(i,i).
-
- E (output) DOUBLE PRECISION array, dimension (N-1)
- The off-diagonal elements of the tridiagonal matrix T:
- E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
-
- TAU (output) DOUBLE PRECISION array, dimension (N-1)
- The scalar factors of the elementary reflectors (see Further
- Details).
-
- WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
- On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
-
- LWORK (input) INTEGER
- The dimension of the array WORK. LWORK >= 1.
- For optimum performance LWORK >= N*NB, where NB is the
- optimal blocksize.
-
- If LWORK = -1, then a workspace query is assumed; the routine
- only calculates the optimal size of the WORK array, returns
- this value as the first entry of the WORK array, and no error
- message related to LWORK is issued by XERBLA.
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- If UPLO = 'U', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(n-1) . . . H(2) H(1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
- A(1:i-1,i+1), and tau in TAU(i).
-
- If UPLO = 'L', the matrix Q is represented as a product of elementary
- reflectors
-
- Q = H(1) H(2) . . . H(n-1).
-
- Each H(i) has the form
-
- H(i) = I - tau * v * v'
-
- where tau is a real scalar, and v is a real vector with
- v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
- and tau in TAU(i).
-
- The contents of A on exit are illustrated by the following examples
- with n = 5:
-
- if UPLO = 'U': if UPLO = 'L':
-
- ( d e v2 v3 v4 ) ( d )
- ( d e v3 v4 ) ( e d )
- ( d e v4 ) ( v1 e d )
- ( d e ) ( v1 v2 e d )
- ( d ) ( v1 v2 v3 e d )
-
- where d and e denote diagonal and off-diagonal elements of T, and vi
- denotes an element of the vector defining H(i).
-
- =====================================================================
-
-
- Test the input parameters
-*/
-
- /* Parameter adjustments */
- a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
- a -= a_offset;
- --d__;
- --e;
- --tau;
- --work;
-
- /* Function Body */
- *info = 0;
- upper = lsame_(uplo, "U");
- lquery = *lwork == -1;
- if ((! upper && ! lsame_(uplo, "L"))) {
- *info = -1;
- } else if (*n < 0) {
- *info = -2;
- } else if (*lda < max(1,*n)) {
- *info = -4;
- } else if ((*lwork < 1 && ! lquery)) {
- *info = -9;
- }
-
- if (*info == 0) {
-
-/* Determine the block size. */
-
- nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
- (ftnlen)1);
- lwkopt = *n * nb;
- work[1] = (doublereal) lwkopt;
- }
-
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DSYTRD", &i__1);
- return 0;
- } else if (lquery) {
- return 0;
- }
-
-/* Quick return if possible */
-
- if (*n == 0) {
- work[1] = 1.;
- return 0;
- }
-
- nx = *n;
- iws = 1;
- if ((nb > 1 && nb < *n)) {
-
-/*
- Determine when to cross over from blocked to unblocked code
- (last block is always handled by unblocked code).
-
- Computing MAX
-*/
- i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, &
- c_n1, (ftnlen)6, (ftnlen)1);
- nx = max(i__1,i__2);
- if (nx < *n) {
-
-/* Determine if workspace is large enough for blocked code. */
-
- ldwork = *n;
- iws = ldwork * nb;
- if (*lwork < iws) {
-
-/*
- Not enough workspace to use optimal NB: determine the
- minimum value of NB, and reduce NB or force use of
- unblocked code by setting NX = N.
-
- Computing MAX
-*/
- i__1 = *lwork / ldwork;
- nb = max(i__1,1);
- nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1,
- (ftnlen)6, (ftnlen)1);
- if (nb < nbmin) {
- nx = *n;
- }
- }
- } else {
- nx = *n;
- }
- } else {
- nb = 1;
- }
-
- if (upper) {
-
-/*
- Reduce the upper triangle of A.
- Columns 1:kk are handled by the unblocked method.
-*/
-
- kk = *n - (*n - nx + nb - 1) / nb * nb;
- i__1 = kk + 1;
- i__2 = -nb;
- for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
- i__2) {
-
-/*
- Reduce columns i:i+nb-1 to tridiagonal form and form the
- matrix W which is needed to update the unreduced part of
- the matrix
-*/
-
- i__3 = i__ + nb - 1;
- dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
- work[1], &ldwork);
-
-/*
- Update the unreduced submatrix A(1:i-1,1:i-1), using an
- update of the form: A := A - V*W' - W*V'
-*/
-
- i__3 = i__ - 1;
- dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ *
- a_dim1 + 1], lda, &work[1], &ldwork, &c_b15, &a[a_offset],
- lda);
-
-/*
- Copy superdiagonal elements back into A, and diagonal
- elements into D
-*/
-
- i__3 = i__ + nb - 1;
- for (j = i__; j <= i__3; ++j) {
- a[j - 1 + j * a_dim1] = e[j - 1];
- d__[j] = a[j + j * a_dim1];
-/* L10: */
- }
-/* L20: */
- }
-
-/* Use unblocked code to reduce the last or only block */
-
- dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
- } else {
-
-/* Reduce the lower triangle of A */
-
- i__2 = *n - nx;
- i__1 = nb;
- for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
-
-/*
- Reduce columns i:i+nb-1 to tridiagonal form and form the
- matrix W which is needed to update the unreduced part of
- the matrix
-*/
-
- i__3 = *n - i__ + 1;
- dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
- tau[i__], &work[1], &ldwork);
-
-/*
- Update the unreduced submatrix A(i+ib:n,i+ib:n), using
- an update of the form: A := A - V*W' - W*V'
-*/
-
- i__3 = *n - i__ - nb + 1;
- dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b151, &a[i__ + nb +
- i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b15, &a[
- i__ + nb + (i__ + nb) * a_dim1], lda);
-
-/*
- Copy subdiagonal elements back into A, and diagonal
- elements into D
-*/
-
- i__3 = i__ + nb - 1;
- for (j = i__; j <= i__3; ++j) {
- a[j + 1 + j * a_dim1] = e[j];
- d__[j] = a[j + j * a_dim1];
-/* L30: */
- }
-/* L40: */
- }
-
-/* Use unblocked code to reduce the last or only block */
-
- i__1 = *n - i__ + 1;
- dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
- &tau[i__], &iinfo);
- }
-
- work[1] = (doublereal) lwkopt;
- return 0;
-
-/* End of DSYTRD */
-
-} /* dsytrd_ */
-
-/* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select,
- integer *n, doublereal *t, integer *ldt, doublereal *vl, integer *
- ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m,
- doublereal *work, integer *info)
-{
- /* System generated locals */
- integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
- i__2, i__3;
- doublereal d__1, d__2, d__3, d__4;
-
- /* Builtin functions */
- double sqrt(doublereal);
-
- /* Local variables */
- static integer i__, j, k;
- static doublereal x[4] /* was [2][2] */;
- static integer j1, j2, n2, ii, ki, ip, is;
- static doublereal wi, wr, rec, ulp, beta, emax;
- static logical pair;
- extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
- integer *);
- static logical allv;
- static integer ierr;
- static doublereal unfl, ovfl, smin;
- static logical over;
- static doublereal vmax;
- static integer jnxt;
- extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
- integer *);
- static doublereal scale;
- extern logical lsame_(char *, char *);
- extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
- doublereal *, doublereal *, integer *, doublereal *, integer *,
- doublereal *, doublereal *, integer *);
- static doublereal remax;
- extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
- doublereal *, integer *);
- static logical leftv, bothv;
- extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
- integer *, doublereal *, integer *);
- static doublereal vcrit;
- static logical somev;
- static doublereal xnorm;
- extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
- doublereal *, doublereal *, doublereal *, integer *, doublereal *,
- doublereal *, doublereal *, integer *, doublereal *, doublereal *
- , doublereal *, integer *, doublereal *, doublereal *, integer *),
- dlabad_(doublereal *, doublereal *);
-
- extern integer idamax_(integer *, doublereal *, integer *);
- extern /* Subroutine */ int xerbla_(char *, integer *);
- static doublereal bignum;
- static logical rightv;
- static doublereal smlnum;
-
-
-/*
- -- LAPACK routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- DTREVC computes some or all of the right and/or left eigenvectors of
- a real upper quasi-triangular matrix T.
-
- The right eigenvector x and the left eigenvector y of T corresponding
- to an eigenvalue w are defined by:
-
- T*x = w*x, y'*T = w*y'
-
- where y' denotes the conjugate transpose of the vector y.
-
- If all eigenvectors are requested, the routine may either return the
- matrices X and/or Y of right or left eigenvectors of T, or the
- products Q*X and/or Q*Y, where Q is an input orthogonal
- matrix. If T was obtained from the real-Schur factorization of an
- original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
- right or left eigenvectors of A.
-
- T must be in Schur canonical form (as returned by DHSEQR), that is,
- block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each
- 2-by-2 diagonal block has its diagonal elements equal and its
- off-diagonal elements of opposite sign. Corresponding to each 2-by-2
- diagonal block is a complex conjugate pair of eigenvalues and
- eigenvectors; only one eigenvector of the pair is computed, namely
- the one corresponding to the eigenvalue with positive imaginary part.
-
- Arguments
- =========
-
- SIDE (input) CHARACTER*1
- = 'R': compute right eigenvectors only;
- = 'L': compute left eigenvectors only;
- = 'B': compute both right and left eigenvectors.
-
- HOWMNY (input) CHARACTER*1
- = 'A': compute all right and/or left eigenvectors;
- = 'B': compute all right and/or left eigenvectors,
- and backtransform them using the input matrices
- supplied in VR and/or VL;
- = 'S': compute selected right and/or left eigenvectors,
- specified by the logical array SELECT.
-
- SELECT (input/output) LOGICAL array, dimension (N)
- If HOWMNY = 'S', SELECT specifies the eigenvectors to be
- computed.
- If HOWMNY = 'A' or 'B', SELECT is not referenced.
- To select the real eigenvector corresponding to a real
- eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select
- the complex eigenvector corresponding to a complex conjugate
- pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be
- set to .TRUE.; then on exit SELECT(j) is .TRUE. and
- SELECT(j+1) is .FALSE..
-
- N (input) INTEGER
- The order of the matrix T. N >= 0.
-
- T (input) DOUBLE PRECISION array, dimension (LDT,N)
- The upper quasi-triangular matrix T in Schur canonical form.
-
- LDT (input) INTEGER
- The leading dimension of the array T. LDT >= max(1,N).
-
- VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM)
- On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
- contain an N-by-N matrix Q (usually the orthogonal matrix Q
- of Schur vectors returned by DHSEQR).
- On exit, if SIDE = 'L' or 'B', VL contains:
- if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
- VL has the same quasi-lower triangular form
- as T'. If T(i,i) is a real eigenvalue, then
- the i-th column VL(i) of VL is its
- corresponding eigenvector. If T(i:i+1,i:i+1)
- is a 2-by-2 block whose eigenvalues are
- complex-conjugate eigenvalues of T, then
- VL(i)+sqrt(-1)*VL(i+1) is the complex
- eigenvector corresponding to the eigenvalue
- with positive real part.
- if HOWMNY = 'B', the matrix Q*Y;
- if HOWMNY = 'S', the left eigenvectors of T specified by
- SELECT, stored consecutively in the columns
- of VL, in the same order as their
- eigenvalues.
- A complex eigenvector corresponding to a complex eigenvalue
- is stored in two consecutive columns, the first holding the
- real part, and the second the imaginary part.
- If SIDE = 'R', VL is not referenced.
-
- LDVL (input) INTEGER
- The leading dimension of the array VL. LDVL >= max(1,N) if
- SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
-
- VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM)
- On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
- contain an N-by-N matrix Q (usually the orthogonal matrix Q
- of Schur vectors returned by DHSEQR).
- On exit, if SIDE = 'R' or 'B', VR contains:
- if HOWMNY = 'A', the matrix X of right eigenvectors of T;
- VR has the same quasi-upper triangular form
- as T. If T(i,i) is a real eigenvalue, then
- the i-th column VR(i) of VR is its
- corresponding eigenvector. If T(i:i+1,i:i+1)
- is a 2-by-2 block whose eigenvalues are
- complex-conjugate eigenvalues of T, then
- VR(i)+sqrt(-1)*VR(i+1) is the complex
- eigenvector corresponding to the eigenvalue
- with positive real part.
- if HOWMNY = 'B', the matrix Q*X;
- if HOWMNY = 'S', the right eigenvectors of T specified by
- SELECT, stored consecutively in the columns
- of VR, in the same order as their
- eigenvalues.
- A complex eigenvector corresponding to a complex eigenvalue
- is stored in two consecutive columns, the first holding the
- real part and the second the imaginary part.
- If SIDE = 'L', VR is not referenced.
-
- LDVR (input) INTEGER
- The leading dimension of the array VR. LDVR >= max(1,N) if
- SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
-
- MM (input) INTEGER
- The number of columns in the arrays VL and/or VR. MM >= M.
-
- M (output) INTEGER
- The number of columns in the arrays VL and/or VR actually
- used to store the eigenvectors.
- If HOWMNY = 'A' or 'B', M is set to N.
- Each selected real eigenvector occupies one column and each
- selected complex eigenvector occupies two columns.
-
- WORK (workspace) DOUBLE PRECISION array, dimension (3*N)
-
- INFO (output) INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
-
- Further Details
- ===============
-
- The algorithm used in this program is basically backward (forward)
- substitution, with scaling to make the the code robust against
- possible overflow.
-
- Each eigenvector is normalized so that the element of largest
- magnitude has magnitude 1; here the magnitude of a complex number
- (x,y) is taken to be |x| + |y|.
-
- =====================================================================
-
-
- Decode and test the input parameters
-*/
-
- /* Parameter adjustments */
- --select;
- t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
- t -= t_offset;
- vl_dim1 = *ldvl;
- vl_offset = 1 + vl_dim1 * 1;
- vl -= vl_offset;
- vr_dim1 = *ldvr;
- vr_offset = 1 + vr_dim1 * 1;
- vr -= vr_offset;
- --work;
-
- /* Function Body */
- bothv = lsame_(side, "B");
- rightv = lsame_(side, "R") || bothv;
- leftv = lsame_(side, "L") || bothv;
-
- allv = lsame_(howmny, "A");
- over = lsame_(howmny, "B");
- somev = lsame_(howmny, "S");
-
- *info = 0;
- if ((! rightv && ! leftv)) {
- *info = -1;
- } else if (((! allv && ! over) && ! somev)) {
- *info = -2;
- } else if (*n < 0) {
- *info = -4;
- } else if (*ldt < max(1,*n)) {
- *info = -6;
- } else if (*ldvl < 1 || (leftv && *ldvl < *n)) {
- *info = -8;
- } else if (*ldvr < 1 || (rightv && *ldvr < *n)) {
- *info = -10;
- } else {
-
-/*
- Set M to the number of columns required to store the selected
- eigenvectors, standardize the array SELECT if necessary, and
- test MM.
-*/
-
- if (somev) {
- *m = 0;
- pair = FALSE_;
- i__1 = *n;
- for (j = 1; j <= i__1; ++j) {
- if (pair) {
- pair = FALSE_;
- select[j] = FALSE_;
- } else {
- if (j < *n) {
- if (t[j + 1 + j * t_dim1] == 0.) {
- if (select[j]) {
- ++(*m);
- }
- } else {
- pair = TRUE_;
- if (select[j] || select[j + 1]) {
- select[j] = TRUE_;
- *m += 2;
- }
- }
- } else {
- if (select[*n]) {
- ++(*m);
- }
- }
- }
-/* L10: */
- }
- } else {
- *m = *n;
- }
-
- if (*mm < *m) {
- *info = -11;
- }
- }
- if (*info != 0) {
- i__1 = -(*info);
- xerbla_("DTREVC", &i__1);
- return 0;
- }
-
-/* Quick return if possible. */
-
- if (*n == 0) {
- return 0;
- }
-
-/* Set the constants to control overflow. */
-
- unfl = SAFEMINIMUM;
- ovfl = 1. / unfl;
- dlabad_(&unfl, &ovfl);
- ulp = PRECISION;
- smlnum = unfl * (*n / ulp);
- bignum = (1. - ulp) / smlnum;
-
-/*
- Compute 1-norm of each column of strictly upper triangular
- part of T to control overflow in triangular solver.
-*/
-
- work[1] = 0.;
- i__1 = *n;
- for (j = 2; j <= i__1; ++j) {
- work[j] = 0.;
- i__2 = j - 1;
- for (i__ = 1; i__ <= i__2; ++i__) {
- work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1));
-/* L20: */
- }
-/* L30: */
- }
-
-/*
- Index IP is used to specify the real or complex eigenvalue:
- IP = 0, real eigenvalue,
- 1, first of conjugate complex pair: (wr,wi)
- -1, second of conjugate complex pair: (wr,wi)
-*/
-
- n2 = (*n) << (1);
-
- if (rightv) {
-
-/* Compute right eigenvectors. */
-
- ip = 0;
- is = *m;
- for (ki = *n; ki >= 1; --ki) {
-
- if (ip == 1) {
- goto L130;
- }
- if (ki == 1) {
- goto L40;
- }
- if (t[ki + (ki - 1) * t_dim1] == 0.) {
- goto L40;
- }
- ip = -1;
-
-L40:
- if (somev) {
- if (ip == 0) {
- if (! select[ki]) {
- goto L130;
- }
- } else {
- if (! select[ki - 1]) {
- goto L130;
- }
- }
- }
-
-/* Compute the KI-th eigenvalue (WR,WI). */
-
- wr = t[ki + ki * t_dim1];
- wi = 0.;
- if (ip != 0) {
- wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) *
- sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2)));
- }
-/* Computing MAX */
- d__1 = ulp * (abs(wr) + abs(wi));
- smin = max(d__1,smlnum);
-
- if (ip == 0) {
-
-/* Real right eigenvector */
-
- work[ki + *n] = 1.;
-
-/* Form right-hand side */
-
- i__1 = ki - 1;
- for (k = 1; k <= i__1; ++k) {
- work[k + *n] = -t[k + ki * t_dim1];
-/* L50: */
- }
-
-/*
- Solve the upper quasi-triangular system:
- (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK.
-*/
-
- jnxt = ki - 1;
- for (j = ki - 1; j >= 1; --j) {
- if (j > jnxt) {
- goto L60;
- }
- j1 = j;
- j2 = j;
- jnxt = j - 1;
- if (j > 1) {
- if (t[j + (j - 1) * t_dim1] != 0.) {
- j1 = j - 1;
- jnxt = j - 2;
- }
- }
-
- if (j1 == j2) {
-
-/* 1-by-1 diagonal block */
-
- dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
- &ierr);
-
-/*
- Scale X(1,1) to avoid overflow when updating
- the right-hand side.
-*/
-
- if (xnorm > 1.) {
- if (work[j] > bignum / xnorm) {
- x[0] /= xnorm;
- scale /= xnorm;
- }
- }
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- dscal_(&ki, &scale, &work[*n + 1], &c__1);
- }
- work[j + *n] = x[0];
-
-/* Update right-hand side */
-
- i__1 = j - 1;
- d__1 = -x[0];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- *n + 1], &c__1);
-
- } else {
-
-/* 2-by-2 diagonal block */
-
- dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b15, &t[j -
- 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, &
- work[j - 1 + *n], n, &wr, &c_b29, x, &c__2, &
- scale, &xnorm, &ierr);
-
-/*
- Scale X(1,1) and X(2,1) to avoid overflow when
- updating the right-hand side.
-*/
-
- if (xnorm > 1.) {
-/* Computing MAX */
- d__1 = work[j - 1], d__2 = work[j];
- beta = max(d__1,d__2);
- if (beta > bignum / xnorm) {
- x[0] /= xnorm;
- x[1] /= xnorm;
- scale /= xnorm;
- }
- }
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- dscal_(&ki, &scale, &work[*n + 1], &c__1);
- }
- work[j - 1 + *n] = x[0];
- work[j + *n] = x[1];
-
-/* Update right-hand side */
-
- i__1 = j - 2;
- d__1 = -x[0];
- daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
- &work[*n + 1], &c__1);
- i__1 = j - 2;
- d__1 = -x[1];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- *n + 1], &c__1);
- }
-L60:
- ;
- }
-
-/* Copy the vector x or Q*x to VR and normalize. */
-
- if (! over) {
- dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
- c__1);
-
- ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
- remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1));
- dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
-
- i__1 = *n;
- for (k = ki + 1; k <= i__1; ++k) {
- vr[k + is * vr_dim1] = 0.;
-/* L70: */
- }
- } else {
- if (ki > 1) {
- i__1 = ki - 1;
- dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
- work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
- vr_dim1 + 1], &c__1);
- }
-
- ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
- remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1));
- dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
- }
-
- } else {
-
-/*
- Complex right eigenvector.
-
- Initial solve
- [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0.
- [ (T(KI,KI-1) T(KI,KI) ) ]
-*/
-
- if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[
- ki + (ki - 1) * t_dim1], abs(d__2))) {
- work[ki - 1 + *n] = 1.;
- work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
- } else {
- work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
- work[ki + n2] = 1.;
- }
- work[ki + *n] = 0.;
- work[ki - 1 + n2] = 0.;
-
-/* Form right-hand side */
-
- i__1 = ki - 2;
- for (k = 1; k <= i__1; ++k) {
- work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
- t_dim1];
- work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
-/* L80: */
- }
-
-/*
- Solve upper quasi-triangular system:
- (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2)
-*/
-
- jnxt = ki - 2;
- for (j = ki - 2; j >= 1; --j) {
- if (j > jnxt) {
- goto L90;
- }
- j1 = j;
- j2 = j;
- jnxt = j - 1;
- if (j > 1) {
- if (t[j + (j - 1) * t_dim1] != 0.) {
- j1 = j - 1;
- jnxt = j - 2;
- }
- }
-
- if (j1 == j2) {
-
-/* 1-by-1 diagonal block */
-
- dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &wi, x, &c__2, &scale, &xnorm, &
- ierr);
-
-/*
- Scale X(1,1) and X(1,2) to avoid overflow when
- updating the right-hand side.
-*/
-
- if (xnorm > 1.) {
- if (work[j] > bignum / xnorm) {
- x[0] /= xnorm;
- x[2] /= xnorm;
- scale /= xnorm;
- }
- }
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- dscal_(&ki, &scale, &work[*n + 1], &c__1);
- dscal_(&ki, &scale, &work[n2 + 1], &c__1);
- }
- work[j + *n] = x[0];
- work[j + n2] = x[2];
-
-/* Update the right-hand side */
-
- i__1 = j - 1;
- d__1 = -x[0];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- *n + 1], &c__1);
- i__1 = j - 1;
- d__1 = -x[2];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- n2 + 1], &c__1);
-
- } else {
-
-/* 2-by-2 diagonal block */
-
- dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b15, &t[j -
- 1 + (j - 1) * t_dim1], ldt, &c_b15, &c_b15, &
- work[j - 1 + *n], n, &wr, &wi, x, &c__2, &
- scale, &xnorm, &ierr);
-
-/*
- Scale X to avoid overflow when updating
- the right-hand side.
-*/
-
- if (xnorm > 1.) {
-/* Computing MAX */
- d__1 = work[j - 1], d__2 = work[j];
- beta = max(d__1,d__2);
- if (beta > bignum / xnorm) {
- rec = 1. / xnorm;
- x[0] *= rec;
- x[2] *= rec;
- x[1] *= rec;
- x[3] *= rec;
- scale *= rec;
- }
- }
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- dscal_(&ki, &scale, &work[*n + 1], &c__1);
- dscal_(&ki, &scale, &work[n2 + 1], &c__1);
- }
- work[j - 1 + *n] = x[0];
- work[j + *n] = x[1];
- work[j - 1 + n2] = x[2];
- work[j + n2] = x[3];
-
-/* Update the right-hand side */
-
- i__1 = j - 2;
- d__1 = -x[0];
- daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
- &work[*n + 1], &c__1);
- i__1 = j - 2;
- d__1 = -x[1];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- *n + 1], &c__1);
- i__1 = j - 2;
- d__1 = -x[2];
- daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
- &work[n2 + 1], &c__1);
- i__1 = j - 2;
- d__1 = -x[3];
- daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
- n2 + 1], &c__1);
- }
-L90:
- ;
- }
-
-/* Copy the vector x or Q*x to VR and normalize. */
-
- if (! over) {
- dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
- + 1], &c__1);
- dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
- c__1);
-
- emax = 0.;
- i__1 = ki;
- for (k = 1; k <= i__1; ++k) {
-/* Computing MAX */
- d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1]
- , abs(d__1)) + (d__2 = vr[k + is * vr_dim1],
- abs(d__2));
- emax = max(d__3,d__4);
-/* L100: */
- }
-
- remax = 1. / emax;
- dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
- dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
-
- i__1 = *n;
- for (k = ki + 1; k <= i__1; ++k) {
- vr[k + (is - 1) * vr_dim1] = 0.;
- vr[k + is * vr_dim1] = 0.;
-/* L110: */
- }
-
- } else {
-
- if (ki > 2) {
- i__1 = ki - 2;
- dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
- work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[(
- ki - 1) * vr_dim1 + 1], &c__1);
- i__1 = ki - 2;
- dgemv_("N", n, &i__1, &c_b15, &vr[vr_offset], ldvr, &
- work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
- vr_dim1 + 1], &c__1);
- } else {
- dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
- + 1], &c__1);
- dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
- c__1);
- }
-
- emax = 0.;
- i__1 = *n;
- for (k = 1; k <= i__1; ++k) {
-/* Computing MAX */
- d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1]
- , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1],
- abs(d__2));
- emax = max(d__3,d__4);
-/* L120: */
- }
- remax = 1. / emax;
- dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
- dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
- }
- }
-
- --is;
- if (ip != 0) {
- --is;
- }
-L130:
- if (ip == 1) {
- ip = 0;
- }
- if (ip == -1) {
- ip = 1;
- }
-/* L140: */
- }
- }
-
- if (leftv) {
-
-/* Compute left eigenvectors. */
-
- ip = 0;
- is = 1;
- i__1 = *n;
- for (ki = 1; ki <= i__1; ++ki) {
-
- if (ip == -1) {
- goto L250;
- }
- if (ki == *n) {
- goto L150;
- }
- if (t[ki + 1 + ki * t_dim1] == 0.) {
- goto L150;
- }
- ip = 1;
-
-L150:
- if (somev) {
- if (! select[ki]) {
- goto L250;
- }
- }
-
-/* Compute the KI-th eigenvalue (WR,WI). */
-
- wr = t[ki + ki * t_dim1];
- wi = 0.;
- if (ip != 0) {
- wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) *
- sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2)));
- }
-/* Computing MAX */
- d__1 = ulp * (abs(wr) + abs(wi));
- smin = max(d__1,smlnum);
-
- if (ip == 0) {
-
-/* Real left eigenvector. */
-
- work[ki + *n] = 1.;
-
-/* Form right-hand side */
-
- i__2 = *n;
- for (k = ki + 1; k <= i__2; ++k) {
- work[k + *n] = -t[ki + k * t_dim1];
-/* L160: */
- }
-
-/*
- Solve the quasi-triangular system:
- (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK
-*/
-
- vmax = 1.;
- vcrit = bignum;
-
- jnxt = ki + 1;
- i__2 = *n;
- for (j = ki + 1; j <= i__2; ++j) {
- if (j < jnxt) {
- goto L170;
- }
- j1 = j;
- j2 = j;
- jnxt = j + 1;
- if (j < *n) {
- if (t[j + 1 + j * t_dim1] != 0.) {
- j2 = j + 1;
- jnxt = j + 2;
- }
- }
-
- if (j1 == j2) {
-
-/*
- 1-by-1 diagonal block
-
- Scale if necessary to avoid overflow when forming
- the right-hand side.
-*/
-
- if (work[j] > vcrit) {
- rec = 1. / vmax;
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + *n], &c__1);
- vmax = 1.;
- vcrit = bignum;
- }
-
- i__3 = j - ki - 1;
- work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
- &c__1, &work[ki + 1 + *n], &c__1);
-
-/* Solve (T(J,J)-WR)'*X = WORK */
-
- dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
- &ierr);
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + *n], &c__1);
- }
- work[j + *n] = x[0];
-/* Computing MAX */
- d__2 = (d__1 = work[j + *n], abs(d__1));
- vmax = max(d__2,vmax);
- vcrit = bignum / vmax;
-
- } else {
-
-/*
- 2-by-2 diagonal block
-
- Scale if necessary to avoid overflow when forming
- the right-hand side.
-
- Computing MAX
-*/
- d__1 = work[j], d__2 = work[j + 1];
- beta = max(d__1,d__2);
- if (beta > vcrit) {
- rec = 1. / vmax;
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + *n], &c__1);
- vmax = 1.;
- vcrit = bignum;
- }
-
- i__3 = j - ki - 1;
- work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
- &c__1, &work[ki + 1 + *n], &c__1);
-
- i__3 = j - ki - 1;
- work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) *
- t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
-
-/*
- Solve
- [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 )
- [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 )
-*/
-
- dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &c_b29, x, &c__2, &scale, &xnorm,
- &ierr);
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + *n], &c__1);
- }
- work[j + *n] = x[0];
- work[j + 1 + *n] = x[1];
-
-/* Computing MAX */
- d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
- = work[j + 1 + *n], abs(d__2)), d__3 = max(
- d__3,d__4);
- vmax = max(d__3,vmax);
- vcrit = bignum / vmax;
-
- }
-L170:
- ;
- }
-
-/* Copy the vector x or Q*x to VL and normalize. */
-
- if (! over) {
- i__2 = *n - ki + 1;
- dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
- vl_dim1], &c__1);
-
- i__2 = *n - ki + 1;
- ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
- 1;
- remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1));
- i__2 = *n - ki + 1;
- dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
-
- i__2 = ki - 1;
- for (k = 1; k <= i__2; ++k) {
- vl[k + is * vl_dim1] = 0.;
-/* L180: */
- }
-
- } else {
-
- if (ki < *n) {
- i__2 = *n - ki;
- dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 1) * vl_dim1
- + 1], ldvl, &work[ki + 1 + *n], &c__1, &work[
- ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
- }
-
- ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
- remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1));
- dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
-
- }
-
- } else {
-
-/*
- Complex left eigenvector.
-
- Initial solve:
- ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0.
- ((T(KI+1,KI) T(KI+1,KI+1)) )
-*/
-
- if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 =
- t[ki + 1 + ki * t_dim1], abs(d__2))) {
- work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
- work[ki + 1 + n2] = 1.;
- } else {
- work[ki + *n] = 1.;
- work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
- }
- work[ki + 1 + *n] = 0.;
- work[ki + n2] = 0.;
-
-/* Form right-hand side */
-
- i__2 = *n;
- for (k = ki + 2; k <= i__2; ++k) {
- work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
- work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
- ;
-/* L190: */
- }
-
-/*
- Solve complex quasi-triangular system:
- ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2
-*/
-
- vmax = 1.;
- vcrit = bignum;
-
- jnxt = ki + 2;
- i__2 = *n;
- for (j = ki + 2; j <= i__2; ++j) {
- if (j < jnxt) {
- goto L200;
- }
- j1 = j;
- j2 = j;
- jnxt = j + 1;
- if (j < *n) {
- if (t[j + 1 + j * t_dim1] != 0.) {
- j2 = j + 1;
- jnxt = j + 2;
- }
- }
-
- if (j1 == j2) {
-
-/*
- 1-by-1 diagonal block
-
- Scale if necessary to avoid overflow when
- forming the right-hand side elements.
-*/
-
- if (work[j] > vcrit) {
- rec = 1. / vmax;
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + *n], &c__1);
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + n2], &c__1);
- vmax = 1.;
- vcrit = bignum;
- }
-
- i__3 = j - ki - 2;
- work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
- &c__1, &work[ki + 2 + *n], &c__1);
- i__3 = j - ki - 2;
- work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
- &c__1, &work[ki + 2 + n2], &c__1);
-
-/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
-
- d__1 = -wi;
- dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
- ierr);
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + *n], &c__1);
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + n2], &c__1);
- }
- work[j + *n] = x[0];
- work[j + n2] = x[2];
-/* Computing MAX */
- d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
- = work[j + n2], abs(d__2)), d__3 = max(d__3,
- d__4);
- vmax = max(d__3,vmax);
- vcrit = bignum / vmax;
-
- } else {
-
-/*
- 2-by-2 diagonal block
-
- Scale if necessary to avoid overflow when forming
- the right-hand side elements.
-
- Computing MAX
-*/
- d__1 = work[j], d__2 = work[j + 1];
- beta = max(d__1,d__2);
- if (beta > vcrit) {
- rec = 1. / vmax;
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + *n], &c__1);
- i__3 = *n - ki + 1;
- dscal_(&i__3, &rec, &work[ki + n2], &c__1);
- vmax = 1.;
- vcrit = bignum;
- }
-
- i__3 = j - ki - 2;
- work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
- &c__1, &work[ki + 2 + *n], &c__1);
-
- i__3 = j - ki - 2;
- work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
- &c__1, &work[ki + 2 + n2], &c__1);
-
- i__3 = j - ki - 2;
- work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
- t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
-
- i__3 = j - ki - 2;
- work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
- t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
-
-/*
- Solve 2-by-2 complex linear equation
- ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B
- ([T(j+1,j) T(j+1,j+1)] )
-*/
-
- d__1 = -wi;
- dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b15, &t[j +
- j * t_dim1], ldt, &c_b15, &c_b15, &work[j + *
- n], n, &wr, &d__1, x, &c__2, &scale, &xnorm, &
- ierr);
-
-/* Scale if necessary */
-
- if (scale != 1.) {
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + *n], &c__1);
- i__3 = *n - ki + 1;
- dscal_(&i__3, &scale, &work[ki + n2], &c__1);
- }
- work[j + *n] = x[0];
- work[j + n2] = x[2];
- work[j + 1 + *n] = x[1];
- work[j + 1 + n2] = x[3];
-/* Computing MAX */
- d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1,
- d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2)
- , d__2 = abs(x[3]), d__1 = max(d__1,d__2);
- vmax = max(d__1,vmax);
- vcrit = bignum / vmax;
-
- }
-L200:
- ;
- }
-
-/*
- Copy the vector x or Q*x to VL and normalize.
-
- L210:
-*/
- if (! over) {
- i__2 = *n - ki + 1;
- dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
- vl_dim1], &c__1);
- i__2 = *n - ki + 1;
- dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
- vl_dim1], &c__1);
-
- emax = 0.;
- i__2 = *n;
- for (k = ki; k <= i__2; ++k) {
-/* Computing MAX */
- d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs(
- d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1],
- abs(d__2));
- emax = max(d__3,d__4);
-/* L220: */
- }
- remax = 1. / emax;
- i__2 = *n - ki + 1;
- dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
- i__2 = *n - ki + 1;
- dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
- ;
-
- i__2 = ki - 1;
- for (k = 1; k <= i__2; ++k) {
- vl[k + is * vl_dim1] = 0.;
- vl[k + (is + 1) * vl_dim1] = 0.;
-/* L230: */
- }
- } else {
- if (ki < *n - 1) {
- i__2 = *n - ki - 1;
- dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1
- + 1], ldvl, &work[ki + 2 + *n], &c__1, &work[
- ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
- i__2 = *n - ki - 1;
- dgemv_("N", n, &i__2, &c_b15, &vl[(ki + 2) * vl_dim1
- + 1], ldvl, &work[ki + 2 + n2], &c__1, &work[
- ki + 1 + n2], &vl[(ki + 1) * vl_dim1 + 1], &
- c__1);
- } else {
- dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
- c__1);
- dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
- + 1], &c__1);
- }
-
- emax = 0.;
- i__2 = *n;
- for (k = 1; k <= i__2; ++k) {
-/* Computing MAX */
- d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs(
- d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1],
- abs(d__2));
- emax = max(d__3,d__4);
-/* L240: */
- }
- remax = 1. / emax;
- dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
- dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
-
- }
-
- }
-
- ++is;
- if (ip != 0) {
- ++is;
- }
-L250:
- if (ip == -1) {
- ip = 0;
- }
- if (ip == 1) {
- ip = -1;
- }
-
-/* L260: */
- }
-
- }
-
- return 0;
-
-/* End of DTREVC */
-
-} /* dtrevc_ */
-
-integer ieeeck_(integer *ispec, real *zero, real *one)
-{
- /* System generated locals */
- integer ret_val;
-
- /* Local variables */
- static real nan1, nan2, nan3, nan4, nan5, nan6, neginf, posinf, negzro,
- newzro;
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1998
-
-
- Purpose
- =======
-
- IEEECK is called from the ILAENV to verify that Infinity and
- possibly NaN arithmetic is safe (i.e. will not trap).
-
- Arguments
- =========
-
- ISPEC (input) INTEGER
- Specifies whether to test just for inifinity arithmetic
- or whether to test for infinity and NaN arithmetic.
- = 0: Verify infinity arithmetic only.
- = 1: Verify infinity and NaN arithmetic.
-
- ZERO (input) REAL
- Must contain the value 0.0
- This is passed to prevent the compiler from optimizing
- away this code.
-
- ONE (input) REAL
- Must contain the value 1.0
- This is passed to prevent the compiler from optimizing
- away this code.
-
- RETURN VALUE: INTEGER
- = 0: Arithmetic failed to produce the correct answers
- = 1: Arithmetic produced the correct answers
-*/
-
- ret_val = 1;
-
- posinf = *one / *zero;
- if (posinf <= *one) {
- ret_val = 0;
- return ret_val;
- }
-
- neginf = -(*one) / *zero;
- if (neginf >= *zero) {
- ret_val = 0;
- return ret_val;
- }
-
- negzro = *one / (neginf + *one);
- if (negzro != *zero) {
- ret_val = 0;
- return ret_val;
- }
-
- neginf = *one / negzro;
- if (neginf >= *zero) {
- ret_val = 0;
- return ret_val;
- }
-
- newzro = negzro + *zero;
- if (newzro != *zero) {
- ret_val = 0;
- return ret_val;
- }
-
- posinf = *one / newzro;
- if (posinf <= *one) {
- ret_val = 0;
- return ret_val;
- }
-
- neginf *= posinf;
- if (neginf >= *zero) {
- ret_val = 0;
- return ret_val;
- }
-
- posinf *= posinf;
- if (posinf <= *one) {
- ret_val = 0;
- return ret_val;
- }
-
-
-/* Return if we were only asked to check infinity arithmetic */
-
- if (*ispec == 0) {
- return ret_val;
- }
-
- nan1 = posinf + neginf;
-
- nan2 = posinf / neginf;
-
- nan3 = posinf / posinf;
-
- nan4 = posinf * *zero;
-
- nan5 = neginf * negzro;
-
- nan6 = nan5 * 0.f;
-
- if (nan1 == nan1) {
- ret_val = 0;
- return ret_val;
- }
-
- if (nan2 == nan2) {
- ret_val = 0;
- return ret_val;
- }
-
- if (nan3 == nan3) {
- ret_val = 0;
- return ret_val;
- }
-
- if (nan4 == nan4) {
- ret_val = 0;
- return ret_val;
- }
-
- if (nan5 == nan5) {
- ret_val = 0;
- return ret_val;
- }
-
- if (nan6 == nan6) {
- ret_val = 0;
- return ret_val;
- }
-
- return ret_val;
-} /* ieeeck_ */
-
-integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1,
- integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen
- opts_len)
-{
- /* System generated locals */
- integer ret_val;
-
- /* Builtin functions */
- /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
- integer s_cmp(char *, char *, ftnlen, ftnlen);
-
- /* Local variables */
- static integer i__;
- static char c1[1], c2[2], c3[3], c4[2];
- static integer ic, nb, iz, nx;
- static logical cname, sname;
- static integer nbmin;
- extern integer ieeeck_(integer *, real *, real *);
- static char subnam[6];
-
-
-/*
- -- LAPACK auxiliary routine (version 3.0) --
- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
- Courant Institute, Argonne National Lab, and Rice University
- June 30, 1999
-
-
- Purpose
- =======
-
- ILAENV is called from the LAPACK routines to choose problem-dependent
- parameters for the local environment. See ISPEC for a description of
- the parameters.
-
- This version provides a set of parameters which should give good,
- but not optimal, performance on many of the currently available
- computers. Users are encouraged to modify this subroutine to set
- the tuning parameters for their particular machine using the option
- and problem size information in the arguments.
-
- This routine will not function correctly if it is converted to all
- lower case. Converting it to all upper case is allowed.
-
- Arguments
- =========
-
- ISPEC (input) INTEGER
- Specifies the parameter to be returned as the value of
- ILAENV.
- = 1: the optimal blocksize; if this value is 1, an unblocked
- algorithm will give the best performance.
- = 2: the minimum block size for which the block routine
- should be used; if the usable block size is less than
- this value, an unblocked routine should be used.
- = 3: the crossover point (in a block routine, for N less
- than this value, an unblocked routine should be used)
- = 4: the number of shifts, used in the nonsymmetric
- eigenvalue routines
- = 5: the minimum column dimension for blocking to be used;
- rectangular blocks must have dimension at least k by m,
- where k is given by ILAENV(2,...) and m by ILAENV(5,...)
- = 6: the crossover point for the SVD (when reducing an m by n
- matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds
- this value, a QR factorization is used first to reduce
- the matrix to a triangular form.)
- = 7: the number of processors
- = 8: the crossover point for the multishift QR and QZ methods
- for nonsymmetric eigenvalue problems.
- = 9: maximum size of the subproblems at the bottom of the
- computation tree in the divide-and-conquer algorithm
- (used by xGELSD and xGESDD)
- =10: ieee NaN arithmetic can be trusted not to trap
- =11: infinity arithmetic can be trusted not to trap
-
- NAME (input) CHARACTER*(*)
- The name of the calling subroutine, in either upper case or
- lower case.
-
- OPTS (input) CHARACTER*(*)
- The character options to the subroutine NAME, concatenated
- into a single character string. For example, UPLO = 'U',
- TRANS = 'T', and DIAG = 'N' for a triangular routine would
- be specified as OPTS = 'UTN'.
-
- N1 (input) INTEGER
- N2 (input) INTEGER
- N3 (input) INTEGER
- N4 (input) INTEGER
- Problem dimensions for the subroutine NAME; these may not all
- be required.
-
- (ILAENV) (output) INTEGER
- >= 0: the value of the parameter specified by ISPEC
- < 0: if ILAENV = -k, the k-th argument had an illegal value.
-
- Further Details
- ===============
-
- The following conventions have been used when calling ILAENV from the
- LAPACK routines:
- 1) OPTS is a concatenation of all of the character options to
- subroutine NAME, in the same order that they appear in the
- argument list for NAME, even if they are not used in determining
- the value of the parameter specified by ISPEC.
- 2) The problem dimensions N1, N2, N3, N4 are specified in the order
- that they appear in the argument list for NAME. N1 is used
- first, N2 second, and so on, and unused problem dimensions are
- passed a value of -1.
- 3) The parameter value returned by ILAENV is checked for validity in
- the calling subroutine. For example, ILAENV is used to retrieve
- the optimal blocksize for STRTRI as follows:
-
- NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 )
- IF( NB.LE.1 ) NB = MAX( 1, N )
-
- =====================================================================
-*/
-
-
- switch (*ispec) {
- case 1: goto L100;
- case 2: goto L100;
- case 3: goto L100;
- case 4: goto L400;
- case 5: goto L500;
- case 6: goto L600;
- case 7: goto L700;
- case 8: goto L800;
- case 9: goto L900;
- case 10: goto L1000;
- case 11: goto L1100;
- }
-
-/* Invalid value for ISPEC */
-
- ret_val = -1;
- return ret_val;
-
-L100:
-
-/* Convert NAME to upper case if the first character is lower case. */
-
- ret_val = 1;
- s_copy(subnam, name__, (ftnlen)6, name_len);
- ic = *(unsigned char *)subnam;
- iz = 'Z';
- if (iz == 90 || iz == 122) {
-
-/* ASCII character set */
-
- if ((ic >= 97 && ic <= 122)) {
- *(unsigned char *)subnam = (char) (ic - 32);
- for (i__ = 2; i__ <= 6; ++i__) {
- ic = *(unsigned char *)&subnam[i__ - 1];
- if ((ic >= 97 && ic <= 122)) {
- *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
- }
-/* L10: */
- }
- }
-
- } else if (iz == 233 || iz == 169) {
-
-/* EBCDIC character set */
-
- if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || (ic >=
- 162 && ic <= 169)) {
- *(unsigned char *)subnam = (char) (ic + 64);
- for (i__ = 2; i__ <= 6; ++i__) {
- ic = *(unsigned char *)&subnam[i__ - 1];
- if ((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153) || (
- ic >= 162 && ic <= 169)) {
- *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64);
- }
-/* L20: */
- }
- }
-
- } else if (iz == 218 || iz == 250) {
-
-/* Prime machines: ASCII+128 */
-
- if ((ic >= 225 && ic <= 250)) {
- *(unsigned char *)subnam = (char) (ic - 32);
- for (i__ = 2; i__ <= 6; ++i__) {
- ic = *(unsigned char *)&subnam[i__ - 1];
- if ((ic >= 225 && ic <= 250)) {
- *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
- }
-/* L30: */
- }
- }
- }
-
- *(unsigned char *)c1 = *(unsigned char *)subnam;
- sname = *(unsigned char *)c1 == 'S' || *(unsigned char *)c1 == 'D';
- cname = *(unsigned char *)c1 == 'C' || *(unsigned char *)c1 == 'Z';
- if (! (cname || sname)) {
- return ret_val;
- }
- s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2);
- s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3);
- s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2);
-
- switch (*ispec) {
- case 1: goto L110;
- case 2: goto L200;
- case 3: goto L300;
- }
-
-L110:
-
-/*
- ISPEC = 1: block size
-
- In these examples, separate code is provided for setting NB for
- real and complex. We assume that NB will take the same value in
- single or double precision.
-*/
-
- nb = 1;
-
- if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- } else if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3,
- "RQF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)
- 3, (ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3)
- == 0) {
- if (sname) {
- nb = 32;
- } else {
- nb = 32;
- }
- } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 32;
- } else {
- nb = 32;
- }
- } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 32;
- } else {
- nb = 32;
- }
- } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- }
- } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- }
- } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
- nb = 32;
- } else if ((sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0)) {
- nb = 64;
- }
- } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- nb = 64;
- } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
- nb = 32;
- } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) {
- nb = 64;
- }
- } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nb = 32;
- }
- } else if (*(unsigned char *)c3 == 'M') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nb = 32;
- }
- }
- } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nb = 32;
- }
- } else if (*(unsigned char *)c3 == 'M') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nb = 32;
- }
- }
- } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- if (*n4 <= 64) {
- nb = 1;
- } else {
- nb = 32;
- }
- } else {
- if (*n4 <= 64) {
- nb = 1;
- } else {
- nb = 32;
- }
- }
- }
- } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- if (*n2 <= 64) {
- nb = 1;
- } else {
- nb = 32;
- }
- } else {
- if (*n2 <= 64) {
- nb = 1;
- } else {
- nb = 32;
- }
- }
- }
- } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- }
- } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nb = 64;
- } else {
- nb = 64;
- }
- }
- } else if ((sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0)) {
- if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) {
- nb = 1;
- }
- }
- ret_val = nb;
- return ret_val;
-
-L200:
-
-/* ISPEC = 2: minimum block size */
-
- nbmin = 2;
- if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
- ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, (
- ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0)
- {
- if (sname) {
- nbmin = 2;
- } else {
- nbmin = 2;
- }
- } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nbmin = 2;
- } else {
- nbmin = 2;
- }
- } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nbmin = 2;
- } else {
- nbmin = 2;
- }
- } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nbmin = 2;
- } else {
- nbmin = 2;
- }
- }
- } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nbmin = 8;
- } else {
- nbmin = 8;
- }
- } else if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
- nbmin = 2;
- }
- } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
- if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
- nbmin = 2;
- }
- } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nbmin = 2;
- }
- } else if (*(unsigned char *)c3 == 'M') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nbmin = 2;
- }
- }
- } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nbmin = 2;
- }
- } else if (*(unsigned char *)c3 == 'M') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nbmin = 2;
- }
- }
- }
- ret_val = nbmin;
- return ret_val;
-
-L300:
-
-/* ISPEC = 3: crossover point */
-
- nx = 0;
- if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
- if (s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "RQF", (
- ftnlen)3, (ftnlen)3) == 0 || s_cmp(c3, "LQF", (ftnlen)3, (
- ftnlen)3) == 0 || s_cmp(c3, "QLF", (ftnlen)3, (ftnlen)3) == 0)
- {
- if (sname) {
- nx = 128;
- } else {
- nx = 128;
- }
- } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nx = 128;
- } else {
- nx = 128;
- }
- } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
- if (sname) {
- nx = 128;
- } else {
- nx = 128;
- }
- }
- } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
- if ((sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0)) {
- nx = 32;
- }
- } else if ((cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0)) {
- if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
- nx = 32;
- }
- } else if ((sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nx = 128;
- }
- }
- } else if ((cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0)) {
- if (*(unsigned char *)c3 == 'G') {
- if (s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "RQ",
- (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "LQ", (ftnlen)2, (
- ftnlen)2) == 0 || s_cmp(c4, "QL", (ftnlen)2, (ftnlen)2) ==
- 0 || s_cmp(c4, "HR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(
- c4, "TR", (ftnlen)2, (ftnlen)2) == 0 || s_cmp(c4, "BR", (
- ftnlen)2, (ftnlen)2) == 0) {
- nx = 128;
- }
- }
- }
- ret_val = nx;
- return ret_val;
-
-L400:
-
-/* ISPEC = 4: number of shifts (used by xHSEQR) */
-
- ret_val = 6;
- return ret_val;
-
-L500:
-
-/* ISPEC = 5: minimum column dimension (not used) */
-
- ret_val = 2;
- return ret_val;
-
-L600:
-
-/* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */
-
- ret_val = (integer) ((real) min(*n1,*n2) * 1.6f);
- return ret_val;
-
-L700:
-
-/* ISPEC = 7: number of processors (not used) */
-
- ret_val = 1;
- return ret_val;
-
-L800:
-
-/* ISPEC = 8: crossover point for multishift (used by xHSEQR) */
-
- ret_val = 50;
- return ret_val;
-
-L900:
-
-/*
- ISPEC = 9: maximum size of the subproblems at the bottom of the
- computation tree in the divide-and-conquer algorithm
- (used by xGELSD and xGESDD)
-*/
-
- ret_val = 25;
- return ret_val;
-
-L1000:
-
-/*
- ISPEC = 10: ieee NaN arithmetic can be trusted not to trap
-
- ILAENV = 0
-*/
- ret_val = 1;
- if (ret_val == 1) {
- ret_val = ieeeck_(&c__0, &c_b3825, &c_b3826);
- }
- return ret_val;
-
-L1100:
-
-/*
- ISPEC = 11: infinity arithmetic can be trusted not to trap
-
- ILAENV = 0
-*/
- ret_val = 1;
- if (ret_val == 1) {
- ret_val = ieeeck_(&c__1, &c_b3825, &c_b3826);
- }
- return ret_val;
-
-/* End of ILAENV */
-
-} /* ilaenv_ */
-
diff --git a/numpy/linalg/info.py b/numpy/linalg/info.py
index 235822dfa..646ecda04 100644
--- a/numpy/linalg/info.py
+++ b/numpy/linalg/info.py
@@ -32,5 +32,6 @@ Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
+from __future__ import division, absolute_import, print_function
depends = ['core']
diff --git a/numpy/linalg/lapack_lite/blas_lite.c b/numpy/linalg/lapack_lite/blas_lite.c
new file mode 100644
index 000000000..bd24768c3
--- /dev/null
+++ b/numpy/linalg/lapack_lite/blas_lite.c
@@ -0,0 +1,21135 @@
+/*
+NOTE: This is generated code. Look in Misc/lapack_lite for information on
+ remaking this file.
+*/
+#include "f2c.h"
+
+#ifdef HAVE_CONFIG
+#include "config.h"
+#else
+extern doublereal dlamch_(char *);
+#define EPSILON dlamch_("Epsilon")
+#define SAFEMINIMUM dlamch_("Safe minimum")
+#define PRECISION dlamch_("Precision")
+#define BASE dlamch_("Base")
+#endif
+
+extern doublereal dlapy2_(doublereal *x, doublereal *y);
+
+
+
+/* Table of constant values */
+
+static complex c_b21 = {1.f,0.f};
+static integer c__1 = 1;
+static doublecomplex c_b1077 = {1.,0.};
+
+/* Subroutine */ int caxpy_(integer *n, complex *ca, complex *cx, integer *
+ incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, ix, iy;
+
+
+/*
+ constant times a vector plus a vector.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if ((r__1 = ca->r, dabs(r__1)) + (r__2 = r_imag(ca), dabs(r__2)) == 0.f) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ i__4 = ix;
+ q__2.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__2.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ q__1.r = cy[i__3].r + q__2.r, q__1.i = cy[i__3].i + q__2.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ q__2.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__2.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ q__1.r = cy[i__3].r + q__2.r, q__1.i = cy[i__3].i + q__2.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* caxpy_ */
+
+/* Subroutine */ int ccopy_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = ix;
+ cy[i__2].r = cx[i__3].r, cy[i__2].i = cx[i__3].i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ cy[i__2].r = cx[i__3].r, cy[i__2].i = cx[i__3].i;
+/* L30: */
+ }
+ return 0;
+} /* ccopy_ */
+
+/* Complex */ VOID cdotc_(complex * ret_val, integer *n, complex *cx, integer
+ *incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static complex ctemp;
+
+
+/*
+ forms the dot product of two vectors, conjugating the first
+ vector.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ ctemp.r = 0.f, ctemp.i = 0.f;
+ ret_val->r = 0.f, ret_val->i = 0.f;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r_cnjg(&q__3, &cx[ix]);
+ i__2 = iy;
+ q__2.r = q__3.r * cy[i__2].r - q__3.i * cy[i__2].i, q__2.i = q__3.r *
+ cy[i__2].i + q__3.i * cy[i__2].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r_cnjg(&q__3, &cx[i__]);
+ i__2 = i__;
+ q__2.r = q__3.r * cy[i__2].r - q__3.i * cy[i__2].i, q__2.i = q__3.r *
+ cy[i__2].i + q__3.i * cy[i__2].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+/* L30: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+} /* cdotc_ */
+
+/* Complex */ VOID cdotu_(complex * ret_val, integer *n, complex *cx, integer
+ *incx, complex *cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static complex ctemp;
+
+
+/*
+ forms the dot product of two vectors.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ ctemp.r = 0.f, ctemp.i = 0.f;
+ ret_val->r = 0.f, ret_val->i = 0.f;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = iy;
+ q__2.r = cx[i__2].r * cy[i__3].r - cx[i__2].i * cy[i__3].i, q__2.i =
+ cx[i__2].r * cy[i__3].i + cx[i__2].i * cy[i__3].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = cx[i__2].r * cy[i__3].r - cx[i__2].i * cy[i__3].i, q__2.i =
+ cx[i__2].r * cy[i__3].i + cx[i__2].i * cy[i__3].r;
+ q__1.r = ctemp.r + q__2.r, q__1.i = ctemp.i + q__2.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+/* L30: */
+ }
+ ret_val->r = ctemp.r, ret_val->i = ctemp.i;
+ return ;
+} /* cdotu_ */
+
+/* Subroutine */ int cgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, complex *alpha, complex *a, integer *lda, complex *b,
+ integer *ldb, complex *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static complex temp;
+ static logical conja, conjb;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ),
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = conjg( B' ).
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - COMPLEX .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - COMPLEX array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ conjugated or transposed, set CONJA and CONJB as true if A and
+ B respectively are to be transposed but not conjugated and set
+ NROWA, NCOLA and NROWB as the number of rows and columns of A
+ and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ conja = lsame_(transa, "C");
+ conjb = lsame_(transb, "C");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! conja && ! lsame_(transa, "T")) {
+ info = 1;
+ } else if (! notb && ! conjb && ! lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("CGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (((alpha->r == 0.f && alpha->i == 0.f) ||
+ (*k == 0)) && (beta->r == 1.f && beta->i == 0.f))) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ q__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+ } else if ((beta->r != 1.f) || (beta->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * b_dim1;
+ if ((b[i__3].r != 0.f) || (b[i__3].i != 0.f)) {
+ i__3 = l + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else if (conja) {
+
+/* Form C := alpha*conjg( A' )*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else if (nota) {
+ if (conjb) {
+
+/* Form C := alpha*A*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L160: */
+ }
+ } else if ((beta->r != 1.f) || (beta->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L170: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if ((b[i__3].r != 0.f) || (b[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L210: */
+ }
+ } else if ((beta->r != 1.f) || (beta->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L220: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if ((b[i__3].r != 0.f) || (b[i__3].i != 0.f)) {
+ i__3 = j + l * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+/* L250: */
+ }
+ }
+ } else if (conja) {
+ if (conjb) {
+
+/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ r_cnjg(&q__4, &b[j + l * b_dim1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i =
+ q__3.r * q__4.i + q__3.i * q__4.r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L260: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L270: */
+ }
+/* L280: */
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = j + l * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L290: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L300: */
+ }
+/* L310: */
+ }
+ }
+ } else {
+ if (conjb) {
+
+/* Form C := alpha*A'*conjg( B' ) + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ r_cnjg(&q__3, &b[j + l * b_dim1]);
+ q__2.r = a[i__4].r * q__3.r - a[i__4].i * q__3.i,
+ q__2.i = a[i__4].r * q__3.i + a[i__4].i *
+ q__3.r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L320: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L330: */
+ }
+/* L340: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = j + l * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, q__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L350: */
+ }
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, q__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L360: */
+ }
+/* L370: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEMM . */
+
+} /* cgemm_ */
+
+/* Subroutine */ int cgemv_(char *trans, integer *m, integer *n, complex *
+ alpha, complex *a, integer *lda, complex *x, integer *incx, complex *
+ beta, complex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static complex temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj;
+
+
+/*
+ Purpose
+ =======
+
+ CGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or
+
+ y := alpha*conjg( A' )*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - COMPLEX array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0.f && alpha->i == 0.f && (
+ beta->r == 1.f && beta->i == 0.f))) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if ((beta->r != 1.f) || (beta->i != 0.f)) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i +
+ q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i +
+ q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, q__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[i__3]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0.f, temp.i = 0.f;
+ ix = kx;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, q__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[i__3]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp.r - alpha->i * temp.i, q__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jy += *incy;
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEMV . */
+
+} /* cgemv_ */
+
+/* Subroutine */ int cgerc_(integer *m, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static complex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CGERC performs the rank 1 operation
+
+ A := alpha*x*conjg( y' ) + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CGERC ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0.f && alpha->i == 0.f)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.f) || (y[i__2].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.f) || (y[i__2].i != 0.f)) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CGERC . */
+
+} /* cgerc_ */
+
+/* Subroutine */ int cgeru_(integer *m, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static complex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CGERU performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CGERU ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0.f && alpha->i == 0.f)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.f) || (y[i__2].i != 0.f)) {
+ i__2 = jy;
+ q__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, q__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.f) || (y[i__2].i != 0.f)) {
+ i__2 = jy;
+ q__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, q__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, q__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ q__1.r = a[i__4].r + q__2.r, q__1.i = a[i__4].i + q__2.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of CGERU . */
+
+} /* cgeru_ */
+
+/* Subroutine */ int chemv_(char *uplo, integer *n, complex *alpha, complex *
+ a, integer *lda, complex *x, integer *incx, complex *beta, complex *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CHEMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced.
+ Note that the imaginary parts of the diagonal elements need
+ not be set and are assumed to be zero.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CHEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f &&
+ beta->i == 0.f))) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if ((beta->r != 1.f) || (beta->i != 0.f)) {
+ if (*incy == 1) {
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0.f && beta->i == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0.f, y[i__2].i = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ q__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i;
+ q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i;
+ q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ r__1 = a[i__4].r;
+ q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i;
+ y[i__3].r = q__1.r, y[i__3].i = q__1.i;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i =
+ q__3.r * x[i__3].i + q__3.i * x[i__3].r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i;
+ y[i__2].r = q__1.r, y[i__2].i = q__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHEMV . */
+
+} /* chemv_ */
+
+/* Subroutine */ int cher2_(char *uplo, integer *n, complex *alpha, complex *
+ x, integer *incx, complex *y, integer *incy, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CHER2 performs the hermitian rank 2 operation
+
+ A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CHER2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (alpha->r == 0.f && alpha->i == 0.f)) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if ((*incx != 1) || (*incy != 1)) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) || (((y[i__3]
+ .r != 0.f) || (y[i__3].i != 0.f)))) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) || (((y[i__3]
+ .r != 0.f) || (y[i__3].i != 0.f)))) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) || (((y[i__3]
+ .r != 0.f) || (y[i__3].i != 0.f)))) {
+ r_cnjg(&q__2, &y[j]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = j;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = i__;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) || (((y[i__3]
+ .r != 0.f) || (y[i__3].i != 0.f)))) {
+ r_cnjg(&q__2, &y[jy]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i, q__1.i =
+ alpha->r * q__2.i + alpha->i * q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__2 = jx;
+ q__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ q__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ q__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ q__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ q__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ q__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = a[i__3].r + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ q__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ q__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ q__2.r = a[i__4].r + q__3.r, q__2.i = a[i__4].i +
+ q__3.i;
+ i__6 = iy;
+ q__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ q__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHER2 . */
+
+} /* cher2_ */
+
+/* Subroutine */ int cher2k_(char *uplo, char *trans, integer *n, integer *k,
+ complex *alpha, complex *a, integer *lda, complex *b, integer *ldb,
+ real *beta, complex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ real r__1;
+ complex q__1, q__2, q__3, q__4, q__5, q__6;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static complex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CHER2K performs one of the hermitian rank 2k operations
+
+ C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C,
+
+ where alpha and beta are scalars with beta real, C is an n by n
+ hermitian matrix and A and B are n by k matrices in the first case
+ and k by n matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) +
+ conjg( alpha )*B*conjg( A' ) +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*B +
+ conjg( alpha )*conjg( B' )*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to REAL( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("CHER2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((alpha->r == 0.f && alpha->i == 0.f) || (*k == 0)) && *
+ beta == 1.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/*
+ Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) || (((b[
+ i__4].r != 0.f) || (b[i__4].i != 0.f)))) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ q__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ q__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ q__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) || (((b[
+ i__4].r != 0.f) || (b[i__4].i != 0.f)))) {
+ r_cnjg(&q__2, &b[j + l * b_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i * q__2.i,
+ q__1.i = alpha->r * q__2.i + alpha->i *
+ q__2.r;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ i__3 = j + l * a_dim1;
+ q__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ q__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ r_cnjg(&q__1, &q__2);
+ temp2.r = q__1.r, temp2.i = q__1.i;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, q__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ q__2.r = c__[i__5].r + q__3.r, q__2.i = c__[i__5]
+ .i + q__3.i;
+ i__7 = i__ + l * b_dim1;
+ q__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, q__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i +
+ q__4.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ q__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ q__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ q__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/*
+ Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ r_cnjg(&q__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L190: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.f) {
+ i__3 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = *beta * c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta *
+ c__[i__4].i;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i +
+ q__4.i;
+ r_cnjg(&q__6, alpha);
+ q__5.r = q__6.r * temp2.r - q__6.i * temp2.i,
+ q__5.i = q__6.r * temp2.i + q__6.i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i +
+ q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0.f, temp1.i = 0.f;
+ temp2.r = 0.f, temp2.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4].i,
+ q__2.i = q__3.r * b[i__4].i + q__3.i * b[i__4]
+ .r;
+ q__1.r = temp1.r + q__2.r, q__1.i = temp1.i + q__2.i;
+ temp1.r = q__1.r, temp1.i = q__1.i;
+ r_cnjg(&q__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i;
+ temp2.r = q__1.r, temp2.i = q__1.i;
+/* L220: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.f) {
+ i__3 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ r__1 = *beta * c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ r_cnjg(&q__4, alpha);
+ q__3.r = q__4.r * temp2.r - q__4.i * temp2.i,
+ q__3.i = q__4.r * temp2.i + q__4.i *
+ temp2.r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta *
+ c__[i__4].i;
+ q__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ q__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i +
+ q__4.i;
+ r_cnjg(&q__6, alpha);
+ q__5.r = q__6.r * temp2.r - q__6.i * temp2.i,
+ q__5.i = q__6.r * temp2.i + q__6.i *
+ temp2.r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i +
+ q__5.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHER2K. */
+
+} /* cher2k_ */
+
+/* Subroutine */ int cherk_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, complex *a, integer *lda, real *beta, complex *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ real r__1;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static real rtemp;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ CHERK performs one of the hermitian rank k operations
+
+ C := alpha*A*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*A + beta*C,
+
+ where alpha and beta are real scalars, C is an n by n hermitian
+ matrix and A is an n by k matrix in the first case and a k by n
+ matrix in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to REAL( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("CHERK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.f) || (*k == 0)) && *beta == 1.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( A' ) + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &a[j + l * a_dim1]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = i__ + l * a_dim1;
+ q__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0.f, c__[i__3].i = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ q__1.r = *beta * c__[i__4].r, q__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L150: */
+ }
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ r_cnjg(&q__2, &a[j + l * a_dim1]);
+ q__1.r = *alpha * q__2.r, q__1.i = *alpha * q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ q__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ r__1 = c__[i__4].r + q__1.r;
+ c__[i__3].r = r__1, c__[i__3].i = 0.f;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ q__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ q__1.r = c__[i__5].r + q__2.r, q__1.i = c__[i__5]
+ .i + q__2.i;
+ c__[i__4].r = q__1.r, c__[i__4].i = q__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = *alpha * temp.r, q__1.i = *alpha * temp.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = *alpha * temp.r, q__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta * c__[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L200: */
+ }
+ rtemp = 0.f;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ r_cnjg(&q__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__3].r - q__3.i * a[i__3].i, q__2.i =
+ q__3.r * a[i__3].i + q__3.i * a[i__3].r;
+ q__1.r = rtemp + q__2.r, q__1.i = q__2.i;
+ rtemp = q__1.r;
+/* L210: */
+ }
+ if (*beta == 0.f) {
+ i__2 = j + j * c_dim1;
+ r__1 = *alpha * rtemp;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+/* L220: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ rtemp = 0.f;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ r_cnjg(&q__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__3].r - q__3.i * a[i__3].i, q__2.i =
+ q__3.r * a[i__3].i + q__3.i * a[i__3].r;
+ q__1.r = rtemp + q__2.r, q__1.i = q__2.i;
+ rtemp = q__1.r;
+/* L230: */
+ }
+ if (*beta == 0.f) {
+ i__2 = j + j * c_dim1;
+ r__1 = *alpha * rtemp;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ r__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = r__1, c__[i__2].i = 0.f;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp.r = 0.f, temp.i = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ r_cnjg(&q__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ q__2.r = q__3.r * a[i__4].r - q__3.i * a[i__4].i,
+ q__2.i = q__3.r * a[i__4].i + q__3.i * a[i__4]
+ .r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i + q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L240: */
+ }
+ if (*beta == 0.f) {
+ i__3 = i__ + j * c_dim1;
+ q__1.r = *alpha * temp.r, q__1.i = *alpha * temp.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ q__2.r = *alpha * temp.r, q__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ q__3.r = *beta * c__[i__4].r, q__3.i = *beta * c__[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+ }
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CHERK . */
+
+} /* cherk_ */
+
+/* Subroutine */ int cscal_(integer *n, complex *ca, complex *cx, integer *
+ incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, nincx;
+
+
+/*
+ scales a vector by a constant.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ i__4 = i__;
+ q__1.r = ca->r * cx[i__4].r - ca->i * cx[i__4].i, q__1.i = ca->r * cx[
+ i__4].i + ca->i * cx[i__4].r;
+ cx[i__3].r = q__1.r, cx[i__3].i = q__1.i;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ i__3 = i__;
+ q__1.r = ca->r * cx[i__3].r - ca->i * cx[i__3].i, q__1.i = ca->r * cx[
+ i__3].i + ca->i * cx[i__3].r;
+ cx[i__1].r = q__1.r, cx[i__1].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* cscal_ */
+
+/* Subroutine */ int csscal_(integer *n, real *sa, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, nincx;
+
+
+/*
+ scales a complex vector by a real constant.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ i__4 = i__;
+ r__1 = *sa * cx[i__4].r;
+ r__2 = *sa * r_imag(&cx[i__]);
+ q__1.r = r__1, q__1.i = r__2;
+ cx[i__3].r = q__1.r, cx[i__3].i = q__1.i;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ i__3 = i__;
+ r__1 = *sa * cx[i__3].r;
+ r__2 = *sa * r_imag(&cx[i__]);
+ q__1.r = r__1, q__1.i = r__2;
+ cx[i__1].r = q__1.r, cx[i__1].i = q__1.i;
+/* L30: */
+ }
+ return 0;
+} /* csscal_ */
+
+/* Subroutine */ int cswap_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static complex ctemp;
+
+
+/*
+ interchanges two vectors.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ ctemp.r = cx[i__2].r, ctemp.i = cx[i__2].i;
+ i__2 = ix;
+ i__3 = iy;
+ cx[i__2].r = cy[i__3].r, cx[i__2].i = cy[i__3].i;
+ i__2 = iy;
+ cy[i__2].r = ctemp.r, cy[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ ctemp.r = cx[i__2].r, ctemp.i = cx[i__2].i;
+ i__2 = i__;
+ i__3 = i__;
+ cx[i__2].r = cy[i__3].r, cx[i__2].i = cy[i__3].i;
+ i__2 = i__;
+ cy[i__2].r = ctemp.r, cy[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* cswap_ */
+
+/* Subroutine */ int ctrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, complex *alpha, complex *a, integer *lda,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ static logical lside;
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ CTRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A )
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if ((b[i__3].r != 0.f) || (b[i__3].i != 0.f)) {
+ i__3 = k + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ q__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
+ .i, q__2.i = temp.r * a[i__6].i +
+ temp.i * a[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, q__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = k + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if ((b[i__2].r != 0.f) || (b[i__2].i != 0.f)) {
+ i__2 = k + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
+ .i, q__1.i = alpha->r * b[i__2].i +
+ alpha->i * b[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = k + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k + k * a_dim1;
+ q__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
+ a[i__4].i, q__1.i = b[i__3].r * a[
+ i__4].i + b[i__3].i * a[i__4].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
+ .i, q__2.i = temp.r * a[i__5].i +
+ temp.i * a[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = i__ + i__ * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
+ .i, q__1.i = temp.r * a[i__2].i +
+ temp.i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ q__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, q__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__3].r - q__3.i * b[i__3]
+ .i, q__2.i = q__3.r * b[i__3].i +
+ q__3.i * b[i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ if (noconj) {
+ if (nounit) {
+ i__3 = i__ + i__ * a_dim1;
+ q__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, q__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, q__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4]
+ .i, q__2.i = q__3.r * b[i__4].i +
+ q__3.i * b[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L140: */
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ q__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
+ .r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L170: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if ((a[i__2].r != 0.f) || (a[i__2].i != 0.f)) {
+ i__2 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
+ .i, q__1.i = alpha->r * a[i__2].i +
+ alpha->i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
+ .r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
+ .r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L210: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ i__3 = k + j * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
+ .i, q__1.i = alpha->r * a[i__3].i +
+ alpha->i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__3].r - alpha->i * a[
+ i__3].i, q__1.i = alpha->r * a[i__3]
+ .i + alpha->i * a[i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i *
+ q__2.i, q__1.i = alpha->r * q__2.i +
+ alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r + q__2.r, q__1.i = b[i__5]
+ .i + q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__2 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ if ((temp.r != 1.f) || (temp.i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if ((a[i__2].r != 0.f) || (a[i__2].i != 0.f)) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ q__1.r = alpha->r * a[i__2].r - alpha->i * a[
+ i__2].i, q__1.i = alpha->r * a[i__2]
+ .i + alpha->i * a[i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[j + k * a_dim1]);
+ q__1.r = alpha->r * q__2.r - alpha->i *
+ q__2.i, q__1.i = alpha->r * q__2.i +
+ alpha->i * q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r + q__2.r, q__1.i = b[i__4]
+ .i + q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__1 = k + k * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ if ((temp.r != 1.f) || (temp.i != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L310: */
+ }
+ }
+/* L320: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRMM . */
+
+} /* ctrmm_ */
+
+/* Subroutine */ int ctrmv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ CTRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x, or x := conjg( A' )*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := conjg( A' )*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("CTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ q__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, q__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r + q__2.r, q__1.i = x[i__4].i +
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = j + j * a_dim1;
+ q__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, q__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if ((x[i__1].r != 0.f) || (x[i__1].i != 0.f)) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = j + j * a_dim1;
+ q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, q__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if ((x[i__1].r != 0.f) || (x[i__1].i != 0.f)) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ q__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i +
+ q__2.i;
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = j + j * a_dim1;
+ q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, q__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__1 = i__;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ q__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = i__ + j * a_dim1;
+ i__2 = ix;
+ q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, q__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__1 = ix;
+ q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i,
+ q__2.i = q__3.r * x[i__1].i + q__3.i * x[
+ i__1].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ q__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ q__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = temp.r * q__2.r - temp.i * q__2.i,
+ q__1.i = temp.r * q__2.i + temp.i *
+ q__2.r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r + q__2.r, q__1.i = temp.i +
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRMV . */
+
+} /* ctrmv_ */
+
+/* Subroutine */ int ctrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, complex *alpha, complex *a, integer *lda,
+ complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ static logical lside;
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ CTRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("CTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0.f && alpha->i == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0.f, b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if ((b[i__2].r != 0.f) || (b[i__2].i != 0.f)) {
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ c_div(&q__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ q__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
+ a[i__6].i, q__2.i = b[i__5].r * a[
+ i__6].i + b[i__5].i * a[i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if ((b[i__3].r != 0.f) || (b[i__3].i != 0.f)) {
+ if (nounit) {
+ i__3 = k + j * b_dim1;
+ c_div(&q__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * b_dim1;
+ i__7 = i__ + k * a_dim1;
+ q__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
+ a[i__7].i, q__2.i = b[i__6].r * a[
+ i__7].i + b[i__6].i * a[i__7].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*inv( A' )*B
+ or B := alpha*inv( conjg( A' ) )*B.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ q__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (noconj) {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ q__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, q__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L110: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__4].r - q__3.i * b[i__4]
+ .i, q__2.i = q__3.r * b[i__4].i +
+ q__3.i * b[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L120: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ q__1.i = alpha->r * b[i__2].i + alpha->i * b[
+ i__2].r;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (noconj) {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ q__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, q__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ r_cnjg(&q__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ q__2.r = q__3.r * b[i__3].r - q__3.i * b[i__3]
+ .i, q__2.i = q__3.r * b[i__3].i +
+ q__3.i * b[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[i__ + i__ * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L190: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * a_dim1;
+ i__7 = i__ + k * b_dim1;
+ q__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
+ b[i__7].i, q__2.i = a[i__6].r * b[
+ i__7].i + a[i__6].i * b[i__7].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ if (nounit) {
+ c_div(&q__1, &c_b21, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L240: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if ((a[i__2].r != 0.f) || (a[i__2].i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * a_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
+ b[i__6].i, q__2.i = a[i__5].r * b[
+ i__6].i + a[i__5].i * b[i__6].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ if (nounit) {
+ c_div(&q__1, &c_b21, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*B*inv( A' )
+ or B := alpha*B*inv( conjg( A' ) ).
+*/
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ if (noconj) {
+ c_div(&q__1, &c_b21, &a[k + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ c_div(&q__1, &c_b21, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ q__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L290: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if ((a[i__2].r != 0.f) || (a[i__2].i != 0.f)) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ } else {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, q__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ q__1.r = b[i__4].r - q__2.r, q__1.i = b[i__4]
+ .i - q__2.i;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ q__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, q__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = q__1.r, b[i__2].i = q__1.i;
+/* L320: */
+ }
+ }
+/* L330: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ if (noconj) {
+ c_div(&q__1, &c_b21, &a[k + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ } else {
+ r_cnjg(&q__2, &a[k + k * a_dim1]);
+ c_div(&q__1, &c_b21, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ q__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L340: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if ((a[i__3].r != 0.f) || (a[i__3].i != 0.f)) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ } else {
+ r_cnjg(&q__1, &a[j + k * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ q__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, q__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ q__1.r = b[i__5].r - q__2.r, q__1.i = b[i__5]
+ .i - q__2.i;
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ if ((alpha->r != 1.f) || (alpha->i != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ q__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, q__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L370: */
+ }
+ }
+/* L380: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRSM . */
+
+} /* ctrsm_ */
+
+/* Subroutine */ int ctrsv_(char *uplo, char *trans, char *diag, integer *n,
+ complex *a, integer *lda, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2, q__3;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ CTRSV solves one of the systems of equations
+
+ A*x = b, or A'*x = b, or conjg( A' )*x = b,
+
+ where b and x are n element vectors and A is an n by n unit, or
+ non-unit, upper or lower triangular matrix.
+
+ No test for singularity or near-singularity is included in this
+ routine. Such tests must be performed before calling this routine.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the equations to be solved as
+ follows:
+
+ TRANS = 'N' or 'n' A*x = b.
+
+ TRANS = 'T' or 't' A'*x = b.
+
+ TRANS = 'C' or 'c' conjg( A' )*x = b.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element right-hand side vector b. On exit, X is overwritten
+ with the solution vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("CTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if ((x[i__1].r != 0.f) || (x[i__1].i != 0.f)) {
+ if (nounit) {
+ i__1 = j;
+ c_div(&q__1, &x[j], &a[j + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ q__1.r = x[i__2].r - q__2.r, q__1.i = x[i__2].i -
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if ((x[i__1].r != 0.f) || (x[i__1].i != 0.f)) {
+ if (nounit) {
+ i__1 = jx;
+ c_div(&q__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = ix;
+ i__2 = ix;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ q__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ q__1.r = x[i__2].r - q__2.r, q__1.i = x[i__2].i -
+ q__2.i;
+ x[i__1].r = q__1.r, x[i__1].i = q__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ if (nounit) {
+ i__2 = j;
+ c_div(&q__1, &x[j], &a[j + j * a_dim1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.f) || (x[i__2].i != 0.f)) {
+ if (nounit) {
+ i__2 = jx;
+ c_div(&q__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ q__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ q__1.r = x[i__4].r - q__2.r, q__1.i = x[i__4].i -
+ q__2.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ix = kx;
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ q__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, q__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i,
+ q__2.i = q__3.r * x[i__3].i + q__3.i * x[
+ i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__;
+ q__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, q__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__2 = i__;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ ix = kx;
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ix;
+ q__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, q__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ c_div(&q__1, &temp, &a[j + j * a_dim1]);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ r_cnjg(&q__3, &a[i__ + j * a_dim1]);
+ i__2 = ix;
+ q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i,
+ q__2.i = q__3.r * x[i__2].i + q__3.i * x[
+ i__2].r;
+ q__1.r = temp.r - q__2.r, q__1.i = temp.i -
+ q__2.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ c_div(&q__1, &temp, &q__2);
+ temp.r = q__1.r, temp.i = q__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRSV . */
+
+} /* ctrsv_ */
+
+/* Subroutine */ int daxpy_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx, doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ constant times a vector plus a vector.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*da == 0.) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] += *da * dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 4;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] += *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 4) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 4) {
+ dy[i__] += *da * dx[i__];
+ dy[i__ + 1] += *da * dx[i__ + 1];
+ dy[i__ + 2] += *da * dx[i__ + 2];
+ dy[i__ + 3] += *da * dx[i__ + 3];
+/* L50: */
+ }
+ return 0;
+} /* daxpy_ */
+
+doublereal dcabs1_(doublecomplex *z__)
+{
+ /* System generated locals */
+ doublereal ret_val;
+ static doublecomplex equiv_0[1];
+
+ /* Local variables */
+#define t ((doublereal *)equiv_0)
+#define zz (equiv_0)
+
+ zz->r = z__->r, zz->i = z__->i;
+ ret_val = abs(t[0]) + abs(t[1]);
+ return ret_val;
+} /* dcabs1_ */
+
+#undef zz
+#undef t
+
+
+/* Subroutine */ int dcopy_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[iy] = dx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 7;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dy[i__] = dx[i__];
+/* L30: */
+ }
+ if (*n < 7) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 7) {
+ dy[i__] = dx[i__];
+ dy[i__ + 1] = dx[i__ + 1];
+ dy[i__ + 2] = dx[i__ + 2];
+ dy[i__ + 3] = dx[i__ + 3];
+ dy[i__ + 4] = dx[i__ + 4];
+ dy[i__ + 5] = dx[i__ + 5];
+ dy[i__ + 6] = dx[i__ + 6];
+/* L50: */
+ }
+ return 0;
+} /* dcopy_ */
+
+doublereal ddot_(integer *n, doublereal *dx, integer *incx, doublereal *dy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static doublereal dtemp;
+
+
+/*
+ forms the dot product of two vectors.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ ret_val = 0.;
+ dtemp = 0.;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[ix] * dy[iy];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val = dtemp;
+ return ret_val;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp += dx[i__] * dy[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 5) {
+ dtemp = dtemp + dx[i__] * dy[i__] + dx[i__ + 1] * dy[i__ + 1] + dx[
+ i__ + 2] * dy[i__ + 2] + dx[i__ + 3] * dy[i__ + 3] + dx[i__ +
+ 4] * dy[i__ + 4];
+/* L50: */
+ }
+L60:
+ ret_val = dtemp;
+ return ret_val;
+} /* ddot_ */
+
+/* Subroutine */ int dgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublereal *alpha, doublereal *a, integer *lda,
+ doublereal *b, integer *ldb, doublereal *beta, doublereal *c__,
+ integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static doublereal temp;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X',
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = A'.
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = B'.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ transposed and set NROWA, NCOLA and NROWB as the number of rows
+ and columns of A and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! lsame_(transa, "C") && ! lsame_(
+ transa, "T")) {
+ info = 1;
+ } else if (! notb && ! lsame_(transb, "C") && !
+ lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("DGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (((*alpha == 0.) || (*k == 0)) && *beta ==
+ 1.)) {
+ return 0;
+ }
+
+/* And if alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[l + j * b_dim1] != 0.) {
+ temp = *alpha * b[l + j * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+/* L100: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (nota) {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L130: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L140: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[j + l * b_dim1] != 0.) {
+ temp = *alpha * b[j + l * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
+/* L180: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMM . */
+
+} /* dgemm_ */
+
+/* Subroutine */ int dgemv_(char *trans, integer *m, integer *n, doublereal *
+ alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
+ doublereal *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*A'*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (*alpha == 0. && *beta == 1.)) {
+ return 0;
+ }
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp * a[i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEMV . */
+
+} /* dgemv_ */
+
+/* Subroutine */ int dger_(integer *m, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublereal temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DGER performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DGER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (*alpha == 0.)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.) {
+ temp = *alpha * y[jy];
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of DGER . */
+
+} /* dger_ */
+
+doublereal dnrm2_(integer *n, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static doublereal ssq, norm, scale, absxi;
+
+
+/*
+ DNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ DNRM2 := sqrt( x'*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to DLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if ((*n < 1) || (*incx < 1)) {
+ norm = 0.;
+ } else if (*n == 1) {
+ norm = abs(x[1]);
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL DLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (scale < absxi) {
+/* Computing 2nd power */
+ d__1 = scale / absxi;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DNRM2. */
+
+} /* dnrm2_ */
+
+/* Subroutine */ int drot_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy, doublereal *c__, doublereal *s)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublereal dtemp;
+
+
+/*
+ applies a plane rotation.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[ix] + *s * dy[iy];
+ dy[iy] = *c__ * dy[iy] - *s * dx[ix];
+ dx[ix] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = *c__ * dx[i__] + *s * dy[i__];
+ dy[i__] = *c__ * dy[i__] - *s * dx[i__];
+ dx[i__] = dtemp;
+/* L30: */
+ }
+ return 0;
+} /* drot_ */
+
+/* Subroutine */ int dscal_(integer *n, doublereal *da, doublereal *dx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, m, mp1, nincx;
+
+
+/*
+ scales a vector by a constant.
+ uses unrolled loops for increment equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ dx[i__] = *da * dx[i__];
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for increment equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ dx[i__] = *da * dx[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 5) {
+ dx[i__] = *da * dx[i__];
+ dx[i__ + 1] = *da * dx[i__ + 1];
+ dx[i__ + 2] = *da * dx[i__ + 2];
+ dx[i__ + 3] = *da * dx[i__ + 3];
+ dx[i__ + 4] = *da * dx[i__ + 4];
+/* L50: */
+ }
+ return 0;
+} /* dscal_ */
+
+/* Subroutine */ int dswap_(integer *n, doublereal *dx, integer *incx,
+ doublereal *dy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static doublereal dtemp;
+
+
+/*
+ interchanges two vectors.
+ uses unrolled loops for increments equal one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dy;
+ --dx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[ix];
+ dx[ix] = dy[iy];
+ dy[iy] = dtemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 3;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+/* L30: */
+ }
+ if (*n < 3) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 3) {
+ dtemp = dx[i__];
+ dx[i__] = dy[i__];
+ dy[i__] = dtemp;
+ dtemp = dx[i__ + 1];
+ dx[i__ + 1] = dy[i__ + 1];
+ dy[i__ + 1] = dtemp;
+ dtemp = dx[i__ + 2];
+ dx[i__ + 2] = dy[i__ + 2];
+ dy[i__ + 2] = dtemp;
+/* L50: */
+ }
+ return 0;
+} /* dswap_ */
+
+/* Subroutine */ int dsymv_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *a, integer *lda, doublereal *x, integer *incx, doublereal
+ *beta, doublereal *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (*alpha == 0. && *beta == 1.)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.) {
+ if (*incy == 1) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.;
+ y[j] += temp1 * a[j + j * a_dim1];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.;
+ y[jy] += temp1 * a[j + j * a_dim1];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYMV . */
+
+} /* dsymv_ */
+
+/* Subroutine */ int dsyr2_(char *uplo, integer *n, doublereal *alpha,
+ doublereal *x, integer *incx, doublereal *y, integer *incy,
+ doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYR2 performs the symmetric rank 2 operation
+
+ A := alpha*x*y' + alpha*y*x' + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (*alpha == 0.)) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if ((*incx != 1) || (*incy != 1)) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[j] != 0.) || (y[j] != 0.)) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[jx] != 0.) || (y[jy] != 0.)) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[j] != 0.) || (y[j] != 0.)) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[jx] != 0.) || (y[jy] != 0.)) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2 . */
+
+} /* dsyr2_ */
+
+/* Subroutine */ int dsyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *b,
+ integer *ldb, doublereal *beta, doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublereal temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYR2K performs one of the symmetric rank 2k operations
+
+ C := alpha*A*B' + alpha*B*A' + beta*C,
+
+ or
+
+ C := alpha*A'*B + alpha*B'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A and B are n by k matrices in the first case and k by n
+ matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' +
+ beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("DSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.) || (*k == 0)) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if ((a[j + l * a_dim1] != 0.) || (b[j + l * b_dim1] != 0.)
+ ) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if ((a[j + l * a_dim1] != 0.) || (b[j + l * b_dim1] != 0.)
+ ) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1 = 0.;
+ temp2 = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYR2K. */
+
+} /* dsyr2k_ */
+
+/* Subroutine */ int dsyrk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublereal *a, integer *lda, doublereal *beta,
+ doublereal *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ DSYRK performs one of the symmetric rank k operations
+
+ C := alpha*A*A' + beta*C,
+
+ or
+
+ C := alpha*A'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A is an n by k matrix in the first case and a k by n matrix
+ in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*A' + beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*A + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - DOUBLE PRECISION array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("DSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.) || (*k == 0)) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DSYRK . */
+
+} /* dsyrk_ */
+
+/* Subroutine */ int dtrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublereal temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A ),
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L30: */
+ }
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ b[k + j * b_dim1] = temp;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ temp = *alpha * b[k + j * b_dim1];
+ b[k + j * b_dim1] = temp;
+ if (nounit) {
+ b[k + j * b_dim1] *= a[k + k * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L90: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L100: */
+ }
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L120: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L150: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L190: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+/* L220: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMM . */
+
+} /* dtrmm_ */
+
+/* Subroutine */ int dtrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublereal *a, integer *lda, doublereal *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := A'*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - DOUBLE PRECISION array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("DTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRMV . */
+
+} /* dtrmv_ */
+
+/* Subroutine */ int dtrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublereal *alpha, doublereal *a, integer *
+ lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublereal temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ DTRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION.
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - DOUBLE PRECISION array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - DOUBLE PRECISION array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("DTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L140: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L170: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L220: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ if (nounit) {
+ temp = 1. / a[j + j * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L270: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L280: */
+ }
+ }
+/* L290: */
+ }
+ if (*alpha != 1.) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ temp = 1. / a[k + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L320: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.) {
+ temp = a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L330: */
+ }
+ }
+/* L340: */
+ }
+ if (*alpha != 1.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRSM . */
+
+} /* dtrsm_ */
+
+doublereal dzasum_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal stemp;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ takes the sum of the absolute values.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0.;
+ stemp = 0.;
+ if ((*n <= 0) || (*incx <= 0)) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[ix]);
+ ix += *incx;
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += dcabs1_(&zx[i__]);
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+} /* dzasum_ */
+
+doublereal dznrm2_(integer *n, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double d_imag(doublecomplex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static doublereal ssq, temp, norm, scale;
+
+
+/*
+ DZNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ DZNRM2 := sqrt( conjg( x' )*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to ZLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if ((*n < 1) || (*incx < 1)) {
+ norm = 0.;
+ } else {
+ scale = 0.;
+ ssq = 1.;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL ZLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.) {
+ i__3 = ix;
+ temp = (d__1 = x[i__3].r, abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+ if (d_imag(&x[ix]) != 0.) {
+ temp = (d__1 = d_imag(&x[ix]), abs(d__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ d__1 = scale / temp;
+ ssq = ssq * (d__1 * d__1) + 1.;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ d__1 = temp / scale;
+ ssq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of DZNRM2. */
+
+} /* dznrm2_ */
+
+integer icamax_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, ix;
+ static real smax;
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0;
+ if ((*n < 1) || (*incx <= 0)) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = (r__1 = cx[1].r, dabs(r__1)) + (r__2 = r_imag(&cx[1]), dabs(r__2));
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ if ((r__1 = cx[i__2].r, dabs(r__1)) + (r__2 = r_imag(&cx[ix]), dabs(
+ r__2)) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ i__2 = ix;
+ smax = (r__1 = cx[i__2].r, dabs(r__1)) + (r__2 = r_imag(&cx[ix]),
+ dabs(r__2));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = (r__1 = cx[1].r, dabs(r__1)) + (r__2 = r_imag(&cx[1]), dabs(r__2));
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if ((r__1 = cx[i__2].r, dabs(r__1)) + (r__2 = r_imag(&cx[i__]), dabs(
+ r__2)) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ i__2 = i__;
+ smax = (r__1 = cx[i__2].r, dabs(r__1)) + (r__2 = r_imag(&cx[i__]),
+ dabs(r__2));
+L30:
+ ;
+ }
+ return ret_val;
+} /* icamax_ */
+
+integer idamax_(integer *n, doublereal *dx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal dmax__;
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --dx;
+
+ /* Function Body */
+ ret_val = 0;
+ if ((*n < 1) || (*incx <= 0)) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ dmax__ = abs(dx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[ix], abs(d__1)) <= dmax__) {
+ goto L5;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[ix], abs(d__1));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ dmax__ = abs(dx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((d__1 = dx[i__], abs(d__1)) <= dmax__) {
+ goto L30;
+ }
+ ret_val = i__;
+ dmax__ = (d__1 = dx[i__], abs(d__1));
+L30:
+ ;
+ }
+ return ret_val;
+} /* idamax_ */
+
+integer isamax_(integer *n, real *sx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, ix;
+ static real smax;
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ ret_val = 0;
+ if ((*n < 1) || (*incx <= 0)) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = dabs(sx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((r__1 = sx[ix], dabs(r__1)) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = (r__1 = sx[ix], dabs(r__1));
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = dabs(sx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if ((r__1 = sx[i__], dabs(r__1)) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = (r__1 = sx[i__], dabs(r__1));
+L30:
+ ;
+ }
+ return ret_val;
+} /* isamax_ */
+
+integer izamax_(integer *n, doublecomplex *zx, integer *incx)
+{
+ /* System generated locals */
+ integer ret_val, i__1;
+
+ /* Local variables */
+ static integer i__, ix;
+ static doublereal smax;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ finds the index of element having max. absolute value.
+ jack dongarra, 1/15/85.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ ret_val = 0;
+ if ((*n < 1) || (*incx <= 0)) {
+ return ret_val;
+ }
+ ret_val = 1;
+ if (*n == 1) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ smax = dcabs1_(&zx[1]);
+ ix += *incx;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[ix]) <= smax) {
+ goto L5;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[ix]);
+L5:
+ ix += *incx;
+/* L10: */
+ }
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ smax = dcabs1_(&zx[1]);
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ if (dcabs1_(&zx[i__]) <= smax) {
+ goto L30;
+ }
+ ret_val = i__;
+ smax = dcabs1_(&zx[i__]);
+L30:
+ ;
+ }
+ return ret_val;
+} /* izamax_ */
+
+logical lsame_(char *ca, char *cb)
+{
+ /* System generated locals */
+ logical ret_val;
+
+ /* Local variables */
+ static integer inta, intb, zcode;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ LSAME returns .TRUE. if CA is the same letter as CB regardless of
+ case.
+
+ Arguments
+ =========
+
+ CA (input) CHARACTER*1
+ CB (input) CHARACTER*1
+ CA and CB specify the single characters to be compared.
+
+ =====================================================================
+
+
+ Test if the characters are equal
+*/
+
+ ret_val = *(unsigned char *)ca == *(unsigned char *)cb;
+ if (ret_val) {
+ return ret_val;
+ }
+
+/* Now test for equivalence if both characters are alphabetic. */
+
+ zcode = 'Z';
+
+/*
+ Use 'Z' rather than 'A' so that ASCII can be detected on Prime
+ machines, on which ICHAR returns a value with bit 8 set.
+ ICHAR('A') on Prime machines returns 193 which is the same as
+ ICHAR('A') on an EBCDIC machine.
+*/
+
+ inta = *(unsigned char *)ca;
+ intb = *(unsigned char *)cb;
+
+ if ((zcode == 90) || (zcode == 122)) {
+
+/*
+ ASCII is assumed - ZCODE is the ASCII code of either lower or
+ upper case 'Z'.
+*/
+
+ if (inta >= 97 && inta <= 122) {
+ inta += -32;
+ }
+ if (intb >= 97 && intb <= 122) {
+ intb += -32;
+ }
+
+ } else if ((zcode == 233) || (zcode == 169)) {
+
+/*
+ EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or
+ upper case 'Z'.
+*/
+
+ if (((inta >= 129 && inta <= 137) || (inta >= 145 && inta <= 153)) ||
+ (inta >= 162 && inta <= 169)) {
+ inta += 64;
+ }
+ if (((intb >= 129 && intb <= 137) || (intb >= 145 && intb <= 153)) ||
+ (intb >= 162 && intb <= 169)) {
+ intb += 64;
+ }
+
+ } else if ((zcode == 218) || (zcode == 250)) {
+
+/*
+ ASCII is assumed, on Prime machines - ZCODE is the ASCII code
+ plus 128 of either lower or upper case 'Z'.
+*/
+
+ if (inta >= 225 && inta <= 250) {
+ inta += -32;
+ }
+ if (intb >= 225 && intb <= 250) {
+ intb += -32;
+ }
+ }
+ ret_val = inta == intb;
+
+/*
+ RETURN
+
+ End of LSAME
+*/
+
+ return ret_val;
+} /* lsame_ */
+
+/* Subroutine */ int saxpy_(integer *n, real *sa, real *sx, integer *incx,
+ real *sy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ constant times a vector plus a vector.
+ uses unrolled loop for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*sa == 0.f) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[iy] += *sa * sx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 4;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[i__] += *sa * sx[i__];
+/* L30: */
+ }
+ if (*n < 4) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 4) {
+ sy[i__] += *sa * sx[i__];
+ sy[i__ + 1] += *sa * sx[i__ + 1];
+ sy[i__ + 2] += *sa * sx[i__ + 2];
+ sy[i__ + 3] += *sa * sx[i__ + 3];
+/* L50: */
+ }
+ return 0;
+} /* saxpy_ */
+
+doublereal scasum_(integer *n, complex *cx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, nincx;
+ static real stemp;
+
+
+/*
+ takes the sum of the absolute values of a complex vector and
+ returns a single precision result.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --cx;
+
+ /* Function Body */
+ ret_val = 0.f;
+ stemp = 0.f;
+ if ((*n <= 0) || (*incx <= 0)) {
+ return ret_val;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ i__3 = i__;
+ stemp = stemp + (r__1 = cx[i__3].r, dabs(r__1)) + (r__2 = r_imag(&cx[
+ i__]), dabs(r__2));
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__1 = i__;
+ stemp = stemp + (r__1 = cx[i__1].r, dabs(r__1)) + (r__2 = r_imag(&cx[
+ i__]), dabs(r__2));
+/* L30: */
+ }
+ ret_val = stemp;
+ return ret_val;
+} /* scasum_ */
+
+doublereal scnrm2_(integer *n, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double r_imag(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static real ssq, temp, norm, scale;
+
+
+/*
+ SCNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ SCNRM2 := sqrt( conjg( x' )*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to CLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if ((*n < 1) || (*incx < 1)) {
+ norm = 0.f;
+ } else {
+ scale = 0.f;
+ ssq = 1.f;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL CLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.f) {
+ i__3 = ix;
+ temp = (r__1 = x[i__3].r, dabs(r__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ r__1 = scale / temp;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+ if (r_imag(&x[ix]) != 0.f) {
+ temp = (r__1 = r_imag(&x[ix]), dabs(r__1));
+ if (scale < temp) {
+/* Computing 2nd power */
+ r__1 = scale / temp;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = temp;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of SCNRM2. */
+
+} /* scnrm2_ */
+
+/* Subroutine */ int scopy_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ uses unrolled loops for increments equal to 1.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[iy] = sx[ix];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 7;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ sy[i__] = sx[i__];
+/* L30: */
+ }
+ if (*n < 7) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 7) {
+ sy[i__] = sx[i__];
+ sy[i__ + 1] = sx[i__ + 1];
+ sy[i__ + 2] = sx[i__ + 2];
+ sy[i__ + 3] = sx[i__ + 3];
+ sy[i__ + 4] = sx[i__ + 4];
+ sy[i__ + 5] = sx[i__ + 5];
+ sy[i__ + 6] = sx[i__ + 6];
+/* L50: */
+ }
+ return 0;
+} /* scopy_ */
+
+doublereal sdot_(integer *n, real *sx, integer *incx, real *sy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static real stemp;
+
+
+/*
+ forms the dot product of two vectors.
+ uses unrolled loops for increments equal to one.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ stemp = 0.f;
+ ret_val = 0.f;
+ if (*n <= 0) {
+ return ret_val;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += sx[ix] * sy[iy];
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val = stemp;
+ return ret_val;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp += sx[i__] * sy[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ goto L60;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 5) {
+ stemp = stemp + sx[i__] * sy[i__] + sx[i__ + 1] * sy[i__ + 1] + sx[
+ i__ + 2] * sy[i__ + 2] + sx[i__ + 3] * sy[i__ + 3] + sx[i__ +
+ 4] * sy[i__ + 4];
+/* L50: */
+ }
+L60:
+ ret_val = stemp;
+ return ret_val;
+} /* sdot_ */
+
+/* Subroutine */ int sgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, real *alpha, real *a, integer *lda, real *b, integer *
+ ldb, real *beta, real *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static real temp;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X',
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = A'.
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = B'.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - REAL array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - REAL array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ transposed and set NROWA, NCOLA and NROWB as the number of rows
+ and columns of A and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! lsame_(transa, "C") && ! lsame_(
+ transa, "T")) {
+ info = 1;
+ } else if (! notb && ! lsame_(transb, "C") && !
+ lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("SGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (((*alpha == 0.f) || (*k == 0)) && *beta
+ == 1.f)) {
+ return 0;
+ }
+
+/* And if alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[l + j * b_dim1] != 0.f) {
+ temp = *alpha * b[l + j * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+/* L100: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ } else {
+ if (nota) {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L130: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L140: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (b[j + l * b_dim1] != 0.f) {
+ temp = *alpha * b[j + l * b_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+/* L170: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * b[j + l * b_dim1];
+/* L180: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEMM . */
+
+} /* sgemm_ */
+
+/* Subroutine */ int sgemv_(char *trans, integer *m, integer *n, real *alpha,
+ real *a, integer *lda, real *x, integer *incx, real *beta, real *y,
+ integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static real temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*A'*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - REAL array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - REAL array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("SGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (*alpha == 0.f && *beta == 1.f)) {
+ return 0;
+ }
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = *alpha * x[jx];
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp * a[i__ + j * a_dim1];
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L100: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = 0.f;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+/* L110: */
+ }
+ y[jy] += *alpha * temp;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEMV . */
+
+} /* sgemv_ */
+
+/* Subroutine */ int sger_(integer *m, integer *n, real *alpha, real *x,
+ integer *incx, real *y, integer *incy, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static real temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SGER performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - REAL array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SGER ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (*alpha == 0.f)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.f) {
+ temp = *alpha * y[jy];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[i__] * temp;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (y[jy] != 0.f) {
+ temp = *alpha * y[jy];
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] += x[ix] * temp;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of SGER . */
+
+} /* sger_ */
+
+doublereal snrm2_(integer *n, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer ix;
+ static real ssq, norm, scale, absxi;
+
+
+/*
+ SNRM2 returns the euclidean norm of a vector via the function
+ name, so that
+
+ SNRM2 := sqrt( x'*x )
+
+
+ -- This version written on 25-October-1982.
+ Modified on 14-October-1993 to inline the call to SLASSQ.
+ Sven Hammarling, Nag Ltd.
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if ((*n < 1) || (*incx < 1)) {
+ norm = 0.f;
+ } else if (*n == 1) {
+ norm = dabs(x[1]);
+ } else {
+ scale = 0.f;
+ ssq = 1.f;
+/*
+ The following loop is equivalent to this call to the LAPACK
+ auxiliary routine:
+ CALL SLASSQ( N, X, INCX, SCALE, SSQ )
+*/
+
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.f) {
+ absxi = (r__1 = x[ix], dabs(r__1));
+ if (scale < absxi) {
+/* Computing 2nd power */
+ r__1 = scale / absxi;
+ ssq = ssq * (r__1 * r__1) + 1.f;
+ scale = absxi;
+ } else {
+/* Computing 2nd power */
+ r__1 = absxi / scale;
+ ssq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ norm = scale * sqrt(ssq);
+ }
+
+ ret_val = norm;
+ return ret_val;
+
+/* End of SNRM2. */
+
+} /* snrm2_ */
+
+/* Subroutine */ int srot_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy, real *c__, real *s)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static real stemp;
+
+
+/*
+ applies a plane rotation.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = *c__ * sx[ix] + *s * sy[iy];
+ sy[iy] = *c__ * sy[iy] - *s * sx[ix];
+ sx[ix] = stemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = *c__ * sx[i__] + *s * sy[i__];
+ sy[i__] = *c__ * sy[i__] - *s * sx[i__];
+ sx[i__] = stemp;
+/* L30: */
+ }
+ return 0;
+} /* srot_ */
+
+/* Subroutine */ int sscal_(integer *n, real *sa, real *sx, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, m, mp1, nincx;
+
+
+/*
+ scales a vector by a constant.
+ uses unrolled loops for increment equal to 1.
+ jack dongarra, linpack, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ nincx = *n * *incx;
+ i__1 = nincx;
+ i__2 = *incx;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ sx[i__] = *sa * sx[i__];
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for increment equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 5;
+ if (m == 0) {
+ goto L40;
+ }
+ i__2 = m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sx[i__] = *sa * sx[i__];
+/* L30: */
+ }
+ if (*n < 5) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__2 = *n;
+ for (i__ = mp1; i__ <= i__2; i__ += 5) {
+ sx[i__] = *sa * sx[i__];
+ sx[i__ + 1] = *sa * sx[i__ + 1];
+ sx[i__ + 2] = *sa * sx[i__ + 2];
+ sx[i__ + 3] = *sa * sx[i__ + 3];
+ sx[i__ + 4] = *sa * sx[i__ + 4];
+/* L50: */
+ }
+ return 0;
+} /* sscal_ */
+
+/* Subroutine */ int sswap_(integer *n, real *sx, integer *incx, real *sy,
+ integer *incy)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, m, ix, iy, mp1;
+ static real stemp;
+
+
+/*
+ interchanges two vectors.
+ uses unrolled loops for increments equal to 1.
+ jack dongarra, linpack, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --sy;
+ --sx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = sx[ix];
+ sx[ix] = sy[iy];
+ sy[iy] = stemp;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/*
+ code for both increments equal to 1
+
+
+ clean-up loop
+*/
+
+L20:
+ m = *n % 3;
+ if (m == 0) {
+ goto L40;
+ }
+ i__1 = m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ stemp = sx[i__];
+ sx[i__] = sy[i__];
+ sy[i__] = stemp;
+/* L30: */
+ }
+ if (*n < 3) {
+ return 0;
+ }
+L40:
+ mp1 = m + 1;
+ i__1 = *n;
+ for (i__ = mp1; i__ <= i__1; i__ += 3) {
+ stemp = sx[i__];
+ sx[i__] = sy[i__];
+ sy[i__] = stemp;
+ stemp = sx[i__ + 1];
+ sx[i__ + 1] = sy[i__ + 1];
+ sy[i__ + 1] = stemp;
+ stemp = sx[i__ + 2];
+ sx[i__ + 2] = sy[i__ + 2];
+ sy[i__ + 2] = stemp;
+/* L50: */
+ }
+ return 0;
+} /* sswap_ */
+
+/* Subroutine */ int ssymv_(char *uplo, integer *n, real *alpha, real *a,
+ integer *lda, real *x, integer *incx, real *beta, real *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SSYMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("SSYMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (*alpha == 0.f && *beta == 1.f)) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if (*beta != 1.f) {
+ if (*incy == 1) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = 0.f;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[i__] = *beta * y[i__];
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = 0.f;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ y[iy] = *beta * y[iy];
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (*alpha == 0.f) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L50: */
+ }
+ y[j] = y[j] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ y[jy] = y[jy] + temp1 * a[j + j * a_dim1] + *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[j];
+ temp2 = 0.f;
+ y[j] += temp1 * a[j + j * a_dim1];
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ y[i__] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ y[j] += *alpha * temp2;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp1 = *alpha * x[jx];
+ temp2 = 0.f;
+ y[jy] += temp1 * a[j + j * a_dim1];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ y[iy] += temp1 * a[i__ + j * a_dim1];
+ temp2 += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ y[jy] += *alpha * temp2;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYMV . */
+
+} /* ssymv_ */
+
+/* Subroutine */ int ssyr2_(char *uplo, integer *n, real *alpha, real *x,
+ integer *incx, real *y, integer *incy, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SSYR2 performs the symmetric rank 2 operation
+
+ A := alpha*x*y' + alpha*y*x' + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n symmetric matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SSYR2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (*alpha == 0.f)) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if ((*incx != 1) || (*incy != 1)) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[j] != 0.f) || (y[j] != 0.f)) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[jx] != 0.f) || (y[jy] != 0.f)) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = kx;
+ iy = ky;
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[j] != 0.f) || (y[j] != 0.f)) {
+ temp1 = *alpha * y[j];
+ temp2 = *alpha * x[j];
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[i__] *
+ temp1 + y[i__] * temp2;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((x[jx] != 0.f) || (y[jy] != 0.f)) {
+ temp1 = *alpha * y[jy];
+ temp2 = *alpha * x[jx];
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + j * a_dim1] + x[ix] *
+ temp1 + y[iy] * temp2;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYR2 . */
+
+} /* ssyr2_ */
+
+/* Subroutine */ int ssyr2k_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, real *a, integer *lda, real *b, integer *ldb, real *beta,
+ real *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static real temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SSYR2K performs one of the symmetric rank 2k operations
+
+ C := alpha*A*B' + alpha*B*A' + beta*C,
+
+ or
+
+ C := alpha*A'*B + alpha*B'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A and B are n by k matrices in the first case and k by n
+ matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*B' + alpha*B*A' +
+ beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*B + alpha*B'*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - REAL array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - REAL array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("SSYR2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.f) || (*k == 0)) && *beta == 1.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*B' + alpha*B*A' + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if ((a[j + l * a_dim1] != 0.f) || (b[j + l * b_dim1] !=
+ 0.f)) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if ((a[j + l * a_dim1] != 0.f) || (b[j + l * b_dim1] !=
+ 0.f)) {
+ temp1 = *alpha * b[j + l * b_dim1];
+ temp2 = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] = c__[i__ + j * c_dim1] + a[
+ i__ + l * a_dim1] * temp1 + b[i__ + l *
+ b_dim1] * temp2;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*B + alpha*B'*A + C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1 = 0.f;
+ temp2 = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1 = 0.f;
+ temp2 = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp1 += a[l + i__ * a_dim1] * b[l + j * b_dim1];
+ temp2 += b[l + i__ * b_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp1 + *alpha *
+ temp2;
+ } else {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1]
+ + *alpha * temp1 + *alpha * temp2;
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYR2K. */
+
+} /* ssyr2k_ */
+
+/* Subroutine */ int ssyrk_(char *uplo, char *trans, integer *n, integer *k,
+ real *alpha, real *a, integer *lda, real *beta, real *c__, integer *
+ ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static real temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ SSYRK performs one of the symmetric rank k operations
+
+ C := alpha*A*A' + beta*C,
+
+ or
+
+ C := alpha*A'*A + beta*C,
+
+ where alpha and beta are scalars, C is an n by n symmetric matrix
+ and A is an n by k matrix in the first case and a k by n matrix
+ in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*A' + beta*C.
+
+ TRANS = 'T' or 't' C := alpha*A'*A + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*A'*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'T' or 't' or 'C' or 'c', K specifies the number
+ of rows of the matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - REAL .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - REAL array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the symmetric matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the symmetric matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("SSYRK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.f) || (*k == 0)) && *beta == 1.f)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ if (upper) {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*A' + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L90: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L100: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = j;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = 0.f;
+/* L140: */
+ }
+ } else if (*beta != 1.f) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] = *beta * c__[i__ + j * c_dim1];
+/* L150: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ if (a[j + l * a_dim1] != 0.f) {
+ temp = *alpha * a[j + l * a_dim1];
+ i__3 = *n;
+ for (i__ = j; i__ <= i__3; ++i__) {
+ c__[i__ + j * c_dim1] += temp * a[i__ + l *
+ a_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*A'*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L190: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp = 0.f;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ temp += a[l + i__ * a_dim1] * a[l + j * a_dim1];
+/* L220: */
+ }
+ if (*beta == 0.f) {
+ c__[i__ + j * c_dim1] = *alpha * temp;
+ } else {
+ c__[i__ + j * c_dim1] = *alpha * temp + *beta * c__[
+ i__ + j * c_dim1];
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SSYRK . */
+
+} /* ssyrk_ */
+
+/* Subroutine */ int strmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, real *alpha, real *a, integer *lda, real *b,
+ integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static real temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ STRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A ),
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - REAL array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("STRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ temp = *alpha * b[k + j * b_dim1];
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L30: */
+ }
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ b[k + j * b_dim1] = temp;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ temp = *alpha * b[k + j * b_dim1];
+ b[k + j * b_dim1] = temp;
+ if (nounit) {
+ b[k + j * b_dim1] *= a[k + k * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * a[i__ + k *
+ a_dim1];
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L90: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L100: */
+ }
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = b[i__ + j * b_dim1];
+ if (nounit) {
+ temp *= a[i__ + i__ * a_dim1];
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ temp += a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L120: */
+ }
+ b[i__ + j * b_dim1] = *alpha * temp;
+/* L130: */
+ }
+/* L140: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L150: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L190: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ temp = *alpha * a[k + j * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+/* L220: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A'. */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = *alpha * a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] += temp * b[i__ + k *
+ b_dim1];
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ temp = *alpha;
+ if (nounit) {
+ temp *= a[k + k * a_dim1];
+ }
+ if (temp != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRMM . */
+
+} /* strmm_ */
+
+/* Subroutine */ int strmv_(char *uplo, char *trans, char *diag, integer *n,
+ real *a, integer *lda, real *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static real temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ STRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := A'*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - REAL array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("STRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L10: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ if (x[j] != 0.f) {
+ temp = x[j];
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[i__] += temp * a[i__ + j * a_dim1];
+/* L50: */
+ }
+ if (nounit) {
+ x[j] *= a[j + j * a_dim1];
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ if (x[jx] != 0.f) {
+ temp = x[jx];
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ x[ix] += temp * a[i__ + j * a_dim1];
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ x[jx] *= a[j + j * a_dim1];
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L90: */
+ }
+ x[j] = temp;
+/* L100: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L110: */
+ }
+ x[jx] = temp;
+ jx -= *incx;
+/* L120: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[j];
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp += a[i__ + j * a_dim1] * x[i__];
+/* L130: */
+ }
+ x[j] = temp;
+/* L140: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp = x[jx];
+ ix = jx;
+ if (nounit) {
+ temp *= a[j + j * a_dim1];
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ temp += a[i__ + j * a_dim1] * x[ix];
+/* L150: */
+ }
+ x[jx] = temp;
+ jx += *incx;
+/* L160: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRMV . */
+
+} /* strmv_ */
+
+/* Subroutine */ int strsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, real *alpha, real *a, integer *lda, real *b,
+ integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static real temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ Purpose
+ =======
+
+ STRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A'.
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = A'.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - REAL .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - REAL array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - REAL array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("STRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.f) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ if (b[k + j * b_dim1] != 0.f) {
+ if (nounit) {
+ b[k + j * b_dim1] /= a[k + k * a_dim1];
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= b[k + j * b_dim1] * a[
+ i__ + k * a_dim1];
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*inv( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L110: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ temp = *alpha * b[i__ + j * b_dim1];
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ temp -= a[k + i__ * a_dim1] * b[k + j * b_dim1];
+/* L140: */
+ }
+ if (nounit) {
+ temp /= a[i__ + i__ * a_dim1];
+ }
+ b[i__ + j * b_dim1] = temp;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L170: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+ if (nounit) {
+ temp = 1.f / a[j + j * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if (*alpha != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = *alpha * b[i__ + j * b_dim1]
+ ;
+/* L220: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ if (a[k + j * a_dim1] != 0.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= a[k + j * a_dim1] * b[
+ i__ + k * b_dim1];
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ if (nounit) {
+ temp = 1.f / a[j + j * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + j * b_dim1] = temp * b[i__ + j * b_dim1];
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*inv( A' ). */
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ temp = 1.f / a[k + k * a_dim1];
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L270: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = a[j + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L280: */
+ }
+ }
+/* L290: */
+ }
+ if (*alpha != 1.f) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ temp = 1.f / a[k + k * a_dim1];
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = temp * b[i__ + k * b_dim1];
+/* L320: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ if (a[j + k * a_dim1] != 0.f) {
+ temp = a[j + k * a_dim1];
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ b[i__ + j * b_dim1] -= temp * b[i__ + k *
+ b_dim1];
+/* L330: */
+ }
+ }
+/* L340: */
+ }
+ if (*alpha != 1.f) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + k * b_dim1] = *alpha * b[i__ + k * b_dim1]
+ ;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRSM . */
+
+} /* strsm_ */
+#if 0
+/* Subroutine */ int xerbla_(char *srname, integer *info)
+{
+ /* Format strings */
+ static char fmt_9999[] = "(\002 ** On entry to \002,a6,\002 parameter nu"
+ "mber \002,i2,\002 had \002,\002an illegal value\002)";
+
+ /* Builtin functions */
+ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
+ /* Subroutine */ int s_stop(char *, ftnlen);
+
+ /* Fortran I/O blocks */
+ static cilist io___425 = { 0, 6, 0, fmt_9999, 0 };
+
+
+/*
+ -- LAPACK auxiliary routine (preliminary version) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ XERBLA is an error handler for the LAPACK routines.
+ It is called by an LAPACK routine if an input parameter has an
+ invalid value. A message is printed and execution stops.
+
+ Installers may consider modifying the STOP statement in order to
+ call system-specific exception-handling facilities.
+
+ Arguments
+ =========
+
+ SRNAME (input) CHARACTER*6
+ The name of the routine which called XERBLA.
+
+ INFO (input) INTEGER
+ The position of the invalid parameter in the parameter list
+ of the calling routine.
+*/
+
+
+ s_wsfe(&io___425);
+ do_fio(&c__1, srname, (ftnlen)6);
+ do_fio(&c__1, (char *)&(*info), (ftnlen)sizeof(integer));
+ e_wsfe();
+
+ s_stop("", (ftnlen)0);
+
+
+/* End of XERBLA */
+
+ return 0;
+} /* xerbla_ */
+#endif
+
+/* Subroutine */ int zaxpy_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ extern doublereal dcabs1_(doublecomplex *);
+
+
+/*
+ constant times a vector plus a vector.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (dcabs1_(za) == 0.) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ i__4 = ix;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__;
+ z__2.r = za->r * zx[i__4].r - za->i * zx[i__4].i, z__2.i = za->r * zx[
+ i__4].i + za->i * zx[i__4].r;
+ z__1.r = zy[i__3].r + z__2.r, z__1.i = zy[i__3].i + z__2.i;
+ zy[i__2].r = z__1.r, zy[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zaxpy_ */
+
+/* Subroutine */ int zcopy_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+
+
+/*
+ copies a vector, x, to a vector, y.
+ jack dongarra, linpack, 4/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = ix;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ zy[i__2].r = zx[i__3].r, zy[i__2].i = zx[i__3].i;
+/* L30: */
+ }
+ return 0;
+} /* zcopy_ */
+
+/* Double Complex */ VOID zdotc_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ forms the dot product of a vector.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[ix]);
+ i__2 = iy;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d_cnjg(&z__3, &zx[i__]);
+ i__2 = i__;
+ z__2.r = z__3.r * zy[i__2].r - z__3.i * zy[i__2].i, z__2.i = z__3.r *
+ zy[i__2].i + z__3.i * zy[i__2].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotc_ */
+
+/* Double Complex */ VOID zdotu_(doublecomplex * ret_val, integer *n,
+ doublecomplex *zx, integer *incx, doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ forms the dot product of two vectors.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ ztemp.r = 0., ztemp.i = 0.;
+ ret_val->r = 0., ret_val->i = 0.;
+ if (*n <= 0) {
+ return ;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments
+ not equal to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = iy;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__2.r = zx[i__2].r * zy[i__3].r - zx[i__2].i * zy[i__3].i, z__2.i =
+ zx[i__2].r * zy[i__3].i + zx[i__2].i * zy[i__3].r;
+ z__1.r = ztemp.r + z__2.r, z__1.i = ztemp.i + z__2.i;
+ ztemp.r = z__1.r, ztemp.i = z__1.i;
+/* L30: */
+ }
+ ret_val->r = ztemp.r, ret_val->i = ztemp.i;
+ return ;
+} /* zdotu_ */
+
+/* Subroutine */ int zdscal_(integer *n, doublereal *da, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, ix;
+
+
+/*
+ scales a vector by a constant.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = ix;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ z__2.r = *da, z__2.i = 0.;
+ i__3 = i__;
+ z__1.r = z__2.r * zx[i__3].r - z__2.i * zx[i__3].i, z__1.i = z__2.r *
+ zx[i__3].i + z__2.i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zdscal_ */
+
+/* Subroutine */ int zgemm_(char *transa, char *transb, integer *m, integer *
+ n, integer *k, doublecomplex *alpha, doublecomplex *a, integer *lda,
+ doublecomplex *b, integer *ldb, doublecomplex *beta, doublecomplex *
+ c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static logical nota, notb;
+ static doublecomplex temp;
+ static logical conja, conjb;
+ static integer ncola;
+ extern logical lsame_(char *, char *);
+ static integer nrowa, nrowb;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGEMM performs one of the matrix-matrix operations
+
+ C := alpha*op( A )*op( B ) + beta*C,
+
+ where op( X ) is one of
+
+ op( X ) = X or op( X ) = X' or op( X ) = conjg( X' ),
+
+ alpha and beta are scalars, and A, B and C are matrices, with op( A )
+ an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n', op( A ) = A.
+
+ TRANSA = 'T' or 't', op( A ) = A'.
+
+ TRANSA = 'C' or 'c', op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ TRANSB - CHARACTER*1.
+ On entry, TRANSB specifies the form of op( B ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSB = 'N' or 'n', op( B ) = B.
+
+ TRANSB = 'T' or 't', op( B ) = B'.
+
+ TRANSB = 'C' or 'c', op( B ) = conjg( B' ).
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix
+ op( A ) and of the matrix C. M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix
+ op( B ) and the number of columns of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry, K specifies the number of columns of the matrix
+ op( A ) and the number of rows of the matrix op( B ). K must
+ be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANSA = 'N' or 'n', and is m otherwise.
+ Before entry with TRANSA = 'N' or 'n', the leading m by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by m part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANSA = 'N' or 'n' then
+ LDA must be at least max( 1, m ), otherwise LDA must be at
+ least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
+ n when TRANSB = 'N' or 'n', and is k otherwise.
+ Before entry with TRANSB = 'N' or 'n', the leading k by n
+ part of the array B must contain the matrix B, otherwise
+ the leading n by k part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANSB = 'N' or 'n' then
+ LDB must be at least max( 1, k ), otherwise LDB must be at
+ least max( 1, n ).
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then C need not be set on input.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry, the leading m by n part of the array C must
+ contain the matrix C, except when beta is zero, in which
+ case C need not be set on entry.
+ On exit, the array C is overwritten by the m by n matrix
+ ( alpha*op( A )*op( B ) + beta*C ).
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Set NOTA and NOTB as true if A and B respectively are not
+ conjugated or transposed, set CONJA and CONJB as true if A and
+ B respectively are to be transposed but not conjugated and set
+ NROWA, NCOLA and NROWB as the number of rows and columns of A
+ and the number of rows of B respectively.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ nota = lsame_(transa, "N");
+ notb = lsame_(transb, "N");
+ conja = lsame_(transa, "C");
+ conjb = lsame_(transb, "C");
+ if (nota) {
+ nrowa = *m;
+ ncola = *k;
+ } else {
+ nrowa = *k;
+ ncola = *m;
+ }
+ if (notb) {
+ nrowb = *k;
+ } else {
+ nrowb = *n;
+ }
+
+/* Test the input parameters. */
+
+ info = 0;
+ if (! nota && ! conja && ! lsame_(transa, "T")) {
+ info = 1;
+ } else if (! notb && ! conjb && ! lsame_(transb, "T")) {
+ info = 2;
+ } else if (*m < 0) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*k < 0) {
+ info = 5;
+ } else if (*lda < max(1,nrowa)) {
+ info = 8;
+ } else if (*ldb < max(1,nrowb)) {
+ info = 10;
+ } else if (*ldc < max(1,*m)) {
+ info = 13;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (((alpha->r == 0. && alpha->i == 0.) || (*
+ k == 0)) && (beta->r == 1. && beta->i == 0.))) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4].i,
+ z__1.i = beta->r * c__[i__4].i + beta->i * c__[
+ i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (notb) {
+ if (nota) {
+
+/* Form C := alpha*A*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+ } else if ((beta->r != 1.) || (beta->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * b_dim1;
+ if ((b[i__3].r != 0.) || (b[i__3].i != 0.)) {
+ i__3 = l + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+/* L90: */
+ }
+ } else if (conja) {
+
+/* Form C := alpha*conjg( A' )*B + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = l + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L140: */
+ }
+/* L150: */
+ }
+ }
+ } else if (nota) {
+ if (conjb) {
+
+/* Form C := alpha*A*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L160: */
+ }
+ } else if ((beta->r != 1.) || (beta->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L170: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if ((b[i__3].r != 0.) || (b[i__3].i != 0.)) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+
+/* Form C := alpha*A*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L210: */
+ }
+ } else if ((beta->r != 1.) || (beta->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__1.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * b_dim1;
+ if ((b[i__3].r != 0.) || (b[i__3].i != 0.)) {
+ i__3 = j + l * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+/* L250: */
+ }
+ }
+ } else if (conja) {
+ if (conjb) {
+
+/* Form C := alpha*conjg( A' )*conjg( B' ) + beta*C. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ d_cnjg(&z__4, &b[j + l * b_dim1]);
+ z__2.r = z__3.r * z__4.r - z__3.i * z__4.i, z__2.i =
+ z__3.r * z__4.i + z__3.i * z__4.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L260: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L270: */
+ }
+/* L280: */
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = j + l * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L290: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L300: */
+ }
+/* L310: */
+ }
+ }
+ } else {
+ if (conjb) {
+
+/* Form C := alpha*A'*conjg( B' ) + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ d_cnjg(&z__3, &b[j + l * b_dim1]);
+ z__2.r = a[i__4].r * z__3.r - a[i__4].i * z__3.i,
+ z__2.i = a[i__4].r * z__3.i + a[i__4].i *
+ z__3.r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L320: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L330: */
+ }
+/* L340: */
+ }
+ } else {
+
+/* Form C := alpha*A'*B' + beta*C */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + i__ * a_dim1;
+ i__5 = j + l * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i * b[i__5]
+ .i, z__2.i = a[i__4].r * b[i__5].i + a[i__4]
+ .i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L350: */
+ }
+ if (beta->r == 0. && beta->i == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__2.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = beta->r * c__[i__4].r - beta->i * c__[i__4]
+ .i, z__3.i = beta->r * c__[i__4].i + beta->i *
+ c__[i__4].r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L360: */
+ }
+/* L370: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMM . */
+
+} /* zgemm_ */
+
+/* Subroutine */ int zgemv_(char *trans, integer *m, integer *n,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ x, integer *incx, doublecomplex *beta, doublecomplex *y, integer *
+ incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp;
+ static integer lenx, leny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj;
+
+
+/*
+ Purpose
+ =======
+
+ ZGEMV performs one of the matrix-vector operations
+
+ y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y, or
+
+ y := alpha*conjg( A' )*x + beta*y,
+
+ where alpha and beta are scalars, x and y are vectors and A is an
+ m by n matrix.
+
+ Parameters
+ ==========
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
+
+ TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
+
+ TRANS = 'C' or 'c' y := alpha*conjg( A' )*x + beta*y.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of DIMENSION at least
+ ( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
+ Before entry, the incremented array X must contain the
+ vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of DIMENSION at least
+ ( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
+ and at least
+ ( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
+ Before entry with BETA non-zero, the incremented array Y
+ must contain the vector y. On exit, Y is overwritten by the
+ updated vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(trans, "N") && ! lsame_(trans, "T") && ! lsame_(trans, "C")
+ ) {
+ info = 1;
+ } else if (*m < 0) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*lda < max(1,*m)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ } else if (*incy == 0) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZGEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0. && alpha->i == 0. && (
+ beta->r == 1. && beta->i == 0.))) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+
+/*
+ Set LENX and LENY, the lengths of the vectors x and y, and set
+ up the start points in X and Y.
+*/
+
+ if (lsame_(trans, "N")) {
+ lenx = *n;
+ leny = *m;
+ } else {
+ lenx = *m;
+ leny = *n;
+ }
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (lenx - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (leny - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+
+ First form y := beta*y.
+*/
+
+ if ((beta->r != 1.) || (beta->i != 0.)) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = leny;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(trans, "N")) {
+
+/* Form y := alpha*A*x + y. */
+
+ jx = kx;
+ if (*incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+ jx += *incx;
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__1.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ iy = ky;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i +
+ z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ iy += *incy;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y := alpha*A'*x + y or y := alpha*conjg( A' )*x + y. */
+
+ jy = ky;
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L110: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = 0., temp.i = 0.;
+ ix = kx;
+ if (noconj) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[i__4]
+ .i, z__2.i = a[i__3].r * x[i__4].i + a[i__3]
+ .i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ } else {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[i__3]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp.r - alpha->i * temp.i, z__2.i =
+ alpha->r * temp.i + alpha->i * temp.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jy += *incy;
+/* L140: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZGEMV . */
+
+} /* zgemv_ */
+
+/* Subroutine */ int zgerc_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGERC performs the rank 1 operation
+
+ A := alpha*x*conjg( y' ) + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERC ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.) || (y[i__2].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.) || (y[i__2].i != 0.)) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERC . */
+
+} /* zgerc_ */
+
+/* Subroutine */ int zgeru_(integer *m, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2;
+
+ /* Local variables */
+ static integer i__, j, ix, jy, kx, info;
+ static doublecomplex temp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZGERU performs the rank 1 operation
+
+ A := alpha*x*y' + A,
+
+ where alpha is a scalar, x is an m element vector, y is an n element
+ vector and A is an m by n matrix.
+
+ Parameters
+ ==========
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of the matrix A.
+ M must be at least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( m - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the m
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry, the leading m by n part of the array A must
+ contain the matrix of coefficients. On exit, A is
+ overwritten by the updated matrix.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (*m < 0) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZGERU ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (((*m == 0) || (*n == 0)) || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (*incy > 0) {
+ jy = 1;
+ } else {
+ jy = 1 - (*n - 1) * *incy;
+ }
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.) || (y[i__2].i != 0.)) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ }
+ jy += *incy;
+/* L20: */
+ }
+ } else {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*m - 1) * *incx;
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jy;
+ if ((y[i__2].r != 0.) || (y[i__2].i != 0.)) {
+ i__2 = jy;
+ z__1.r = alpha->r * y[i__2].r - alpha->i * y[i__2].i, z__1.i =
+ alpha->r * y[i__2].i + alpha->i * y[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix = kx;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__2.r = x[i__5].r * temp.r - x[i__5].i * temp.i, z__2.i =
+ x[i__5].r * temp.i + x[i__5].i * temp.r;
+ z__1.r = a[i__4].r + z__2.r, z__1.i = a[i__4].i + z__2.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ }
+ jy += *incy;
+/* L40: */
+ }
+ }
+
+ return 0;
+
+/* End of ZGERU . */
+
+} /* zgeru_ */
+
+/* Subroutine */ int zhemv_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx,
+ doublecomplex *beta, doublecomplex *y, integer *incy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHEMV performs the matrix-vector operation
+
+ y := alpha*A*x + beta*y,
+
+ where alpha and beta are scalars, x and y are n element vectors and
+ A is an n by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced.
+ Note that the imaginary parts of the diagonal elements need
+ not be set and are assumed to be zero.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ BETA - COMPLEX*16 .
+ On entry, BETA specifies the scalar beta. When BETA is
+ supplied as zero then Y need not be set on input.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y. On exit, Y is overwritten by the updated
+ vector y.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --y;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*lda < max(1,*n)) {
+ info = 5;
+ } else if (*incx == 0) {
+ info = 7;
+ } else if (*incy == 0) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHEMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (alpha->r == 0. && alpha->i == 0. && (beta->r == 1. &&
+ beta->i == 0.))) {
+ return 0;
+ }
+
+/* Set up the start points in X and Y. */
+
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+
+ First form y := beta*y.
+*/
+
+ if ((beta->r != 1.) || (beta->i != 0.)) {
+ if (*incy == 1) {
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ y[i__2].r = 0., y[i__2].i = 0.;
+/* L10: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L20: */
+ }
+ }
+ } else {
+ iy = ky;
+ if (beta->r == 0. && beta->i == 0.) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ y[i__2].r = 0., y[i__2].i = 0.;
+ iy += *incy;
+/* L30: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = iy;
+ i__3 = iy;
+ z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i,
+ z__1.i = beta->r * y[i__3].i + beta->i * y[i__3]
+ .r;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ iy += *incy;
+/* L40: */
+ }
+ }
+ }
+ }
+ if (alpha->r == 0. && alpha->i == 0.) {
+ return 0;
+ }
+ if (lsame_(uplo, "U")) {
+
+/* Form y when A is stored in upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L50: */
+ }
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L70: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i;
+ z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i;
+ z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form y when A is stored in lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L90: */
+ }
+ i__2 = j;
+ i__3 = j;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+/* L100: */
+ }
+ } else {
+ jx = kx;
+ jy = ky;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i =
+ alpha->r * x[i__2].i + alpha->i * x[i__2].r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ temp2.r = 0., temp2.i = 0.;
+ i__2 = jy;
+ i__3 = jy;
+ i__4 = j + j * a_dim1;
+ d__1 = a[i__4].r;
+ z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = iy;
+ i__4 = iy;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i,
+ z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5]
+ .r;
+ z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i;
+ y[i__3].r = z__1.r, y[i__3].i = z__1.i;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i =
+ z__3.r * x[i__3].i + z__3.i * x[i__3].r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L110: */
+ }
+ i__2 = jy;
+ i__3 = jy;
+ z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i =
+ alpha->r * temp2.i + alpha->i * temp2.r;
+ z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i;
+ y[i__2].r = z__1.r, y[i__2].i = z__1.i;
+ jx += *incx;
+ jy += *incy;
+/* L120: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHEMV . */
+
+} /* zhemv_ */
+
+/* Subroutine */ int zher2_(char *uplo, integer *n, doublecomplex *alpha,
+ doublecomplex *x, integer *incx, doublecomplex *y, integer *incy,
+ doublecomplex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, iy, jx, jy, kx, ky, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHER2 performs the hermitian rank 2 operation
+
+ A := alpha*x*conjg( y' ) + conjg( alpha )*y*conjg( x' ) + A,
+
+ where alpha is a scalar, x and y are n element vectors and A is an n
+ by n hermitian matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array A is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of A
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of A
+ is to be referenced.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x.
+ Unchanged on exit.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+ Y - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCY ) ).
+ Before entry, the incremented array Y must contain the n
+ element vector y.
+ Unchanged on exit.
+
+ INCY - INTEGER.
+ On entry, INCY specifies the increment for the elements of
+ Y. INCY must not be zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of A is not referenced. On exit, the
+ upper triangular part of the array A is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of A is not referenced. On exit, the
+ lower triangular part of the array A is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --x;
+ --y;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (*n < 0) {
+ info = 2;
+ } else if (*incx == 0) {
+ info = 5;
+ } else if (*incy == 0) {
+ info = 7;
+ } else if (*lda < max(1,*n)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2 ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (alpha->r == 0. && alpha->i == 0.)) {
+ return 0;
+ }
+
+/*
+ Set up the start points in X and Y if the increments are not both
+ unity.
+*/
+
+ if ((*incx != 1) || (*incy != 1)) {
+ if (*incx > 0) {
+ kx = 1;
+ } else {
+ kx = 1 - (*n - 1) * *incx;
+ }
+ if (*incy > 0) {
+ ky = 1;
+ } else {
+ ky = 1 - (*n - 1) * *incy;
+ }
+ jx = kx;
+ jy = ky;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through the triangular part
+ of A.
+*/
+
+ if (lsame_(uplo, "U")) {
+
+/* Form A when A is stored in the upper triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (((x[i__2].r != 0.) || (x[i__2].i != 0.)) || (((y[i__3].r
+ != 0.) || (y[i__3].i != 0.)))) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (((x[i__2].r != 0.) || (x[i__2].i != 0.)) || (((y[i__3].r
+ != 0.) || (y[i__3].i != 0.)))) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ ix = kx;
+ iy = ky;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+ ix += *incx;
+ iy += *incy;
+/* L30: */
+ }
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L40: */
+ }
+ }
+ } else {
+
+/* Form A when A is stored in the lower triangle. */
+
+ if (*incx == 1 && *incy == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ i__3 = j;
+ if (((x[i__2].r != 0.) || (x[i__2].i != 0.)) || (((y[i__3].r
+ != 0.) || (y[i__3].i != 0.)))) {
+ d_cnjg(&z__2, &y[j]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = j;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = j;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = j;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = i__;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = i__;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L50: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ i__3 = jy;
+ if (((x[i__2].r != 0.) || (x[i__2].i != 0.)) || (((y[i__3].r
+ != 0.) || (y[i__3].i != 0.)))) {
+ d_cnjg(&z__2, &y[jy]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i, z__1.i =
+ alpha->r * z__2.i + alpha->i * z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__2 = jx;
+ z__2.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i,
+ z__2.i = alpha->r * x[i__2].i + alpha->i * x[i__2]
+ .r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ i__4 = jx;
+ z__2.r = x[i__4].r * temp1.r - x[i__4].i * temp1.i,
+ z__2.i = x[i__4].r * temp1.i + x[i__4].i *
+ temp1.r;
+ i__5 = jy;
+ z__3.r = y[i__5].r * temp2.r - y[i__5].i * temp2.i,
+ z__3.i = y[i__5].r * temp2.i + y[i__5].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = a[i__3].r + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ ix = jx;
+ iy = jy;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ iy += *incy;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ i__5 = ix;
+ z__3.r = x[i__5].r * temp1.r - x[i__5].i * temp1.i,
+ z__3.i = x[i__5].r * temp1.i + x[i__5].i *
+ temp1.r;
+ z__2.r = a[i__4].r + z__3.r, z__2.i = a[i__4].i +
+ z__3.i;
+ i__6 = iy;
+ z__4.r = y[i__6].r * temp2.r - y[i__6].i * temp2.i,
+ z__4.i = y[i__6].r * temp2.i + y[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
+ a[i__3].r = z__1.r, a[i__3].i = z__1.i;
+/* L70: */
+ }
+ } else {
+ i__2 = j + j * a_dim1;
+ i__3 = j + j * a_dim1;
+ d__1 = a[i__3].r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ }
+ jx += *incx;
+ jy += *incy;
+/* L80: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2 . */
+
+} /* zher2_ */
+
+/* Subroutine */ int zher2k_(char *uplo, char *trans, integer *n, integer *k,
+ doublecomplex *alpha, doublecomplex *a, integer *lda, doublecomplex *
+ b, integer *ldb, doublereal *beta, doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5, i__6, i__7;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3, z__4, z__5, z__6;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublecomplex temp1, temp2;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHER2K performs one of the hermitian rank 2k operations
+
+ C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A + beta*C,
+
+ where alpha and beta are scalars with beta real, C is an n by n
+ hermitian matrix and A and B are n by k matrices in the first case
+ and k by n matrices in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( B' ) +
+ conjg( alpha )*B*conjg( A' ) +
+ beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*B +
+ conjg( alpha )*conjg( B' )*A +
+ beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrices A and B, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrices A and B. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, kb ), where kb is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array B must contain the matrix B, otherwise
+ the leading k by n part of the array B must contain the
+ matrix B.
+ Unchanged on exit.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDB must be at least max( 1, n ), otherwise LDB must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION .
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldb < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldc < max(1,*n)) {
+ info = 12;
+ }
+ if (info != 0) {
+ xerbla_("ZHER2K", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((alpha->r == 0. && alpha->i == 0.) || (*k == 0)) && *
+ beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/*
+ Form C := alpha*A*conjg( B' ) + conjg( alpha )*B*conjg( A' ) +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (((a[i__3].r != 0.) || (a[i__3].i != 0.)) || (((b[i__4]
+ .r != 0.) || (b[i__4].i != 0.)))) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ i__4 = j + l * b_dim1;
+ if (((a[i__3].r != 0.) || (a[i__3].i != 0.)) || (((b[i__4]
+ .r != 0.) || (b[i__4].i != 0.)))) {
+ d_cnjg(&z__2, &b[j + l * b_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i * z__2.i,
+ z__1.i = alpha->r * z__2.i + alpha->i *
+ z__2.r;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ i__3 = j + l * a_dim1;
+ z__2.r = alpha->r * a[i__3].r - alpha->i * a[i__3].i,
+ z__2.i = alpha->r * a[i__3].i + alpha->i * a[
+ i__3].r;
+ d_cnjg(&z__1, &z__2);
+ temp2.r = z__1.r, temp2.i = z__1.i;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__3.r = a[i__6].r * temp1.r - a[i__6].i *
+ temp1.i, z__3.i = a[i__6].r * temp1.i + a[
+ i__6].i * temp1.r;
+ z__2.r = c__[i__5].r + z__3.r, z__2.i = c__[i__5]
+ .i + z__3.i;
+ i__7 = i__ + l * b_dim1;
+ z__4.r = b[i__7].r * temp2.r - b[i__7].i *
+ temp2.i, z__4.i = b[i__7].r * temp2.i + b[
+ i__7].i * temp2.r;
+ z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
+ z__4.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__2.r = a[i__5].r * temp1.r - a[i__5].i * temp1.i,
+ z__2.i = a[i__5].r * temp1.i + a[i__5].i *
+ temp1.r;
+ i__6 = j + l * b_dim1;
+ z__3.r = b[i__6].r * temp2.r - b[i__6].i * temp2.i,
+ z__3.i = b[i__6].r * temp2.i + b[i__6].i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/*
+ Form C := alpha*conjg( A' )*B + conjg( alpha )*conjg( B' )*A +
+ C.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L190: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L200: */
+ }
+/* L210: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ temp1.r = 0., temp1.i = 0.;
+ temp2.r = 0., temp2.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4].i,
+ z__2.i = z__3.r * b[i__4].i + z__3.i * b[i__4]
+ .r;
+ z__1.r = temp1.r + z__2.r, z__1.i = temp1.i + z__2.i;
+ temp1.r = z__1.r, temp1.i = z__1.i;
+ d_cnjg(&z__3, &b[l + i__ * b_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i;
+ temp2.r = z__1.r, temp2.i = z__1.i;
+/* L220: */
+ }
+ if (i__ == j) {
+ if (*beta == 0.) {
+ i__3 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ } else {
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ d__1 = *beta * c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+ } else {
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__2.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ d_cnjg(&z__4, alpha);
+ z__3.r = z__4.r * temp2.r - z__4.i * temp2.i,
+ z__3.i = z__4.r * temp2.i + z__4.i *
+ temp2.r;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
+ z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta *
+ c__[i__4].i;
+ z__4.r = alpha->r * temp1.r - alpha->i * temp1.i,
+ z__4.i = alpha->r * temp1.i + alpha->i *
+ temp1.r;
+ z__2.r = z__3.r + z__4.r, z__2.i = z__3.i +
+ z__4.i;
+ d_cnjg(&z__6, alpha);
+ z__5.r = z__6.r * temp2.r - z__6.i * temp2.i,
+ z__5.i = z__6.r * temp2.i + z__6.i *
+ temp2.r;
+ z__1.r = z__2.r + z__5.r, z__1.i = z__2.i +
+ z__5.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHER2K. */
+
+} /* zher2k_ */
+
+/* Subroutine */ int zherk_(char *uplo, char *trans, integer *n, integer *k,
+ doublereal *alpha, doublecomplex *a, integer *lda, doublereal *beta,
+ doublecomplex *c__, integer *ldc)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublereal d__1;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, l, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static doublereal rtemp;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ Purpose
+ =======
+
+ ZHERK performs one of the hermitian rank k operations
+
+ C := alpha*A*conjg( A' ) + beta*C,
+
+ or
+
+ C := alpha*conjg( A' )*A + beta*C,
+
+ where alpha and beta are real scalars, C is an n by n hermitian
+ matrix and A is an n by k matrix in the first case and a k by n
+ matrix in the second case.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the upper or lower
+ triangular part of the array C is to be referenced as
+ follows:
+
+ UPLO = 'U' or 'u' Only the upper triangular part of C
+ is to be referenced.
+
+ UPLO = 'L' or 'l' Only the lower triangular part of C
+ is to be referenced.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' C := alpha*A*conjg( A' ) + beta*C.
+
+ TRANS = 'C' or 'c' C := alpha*conjg( A' )*A + beta*C.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix C. N must be
+ at least zero.
+ Unchanged on exit.
+
+ K - INTEGER.
+ On entry with TRANS = 'N' or 'n', K specifies the number
+ of columns of the matrix A, and on entry with
+ TRANS = 'C' or 'c', K specifies the number of rows of the
+ matrix A. K must be at least zero.
+ Unchanged on exit.
+
+ ALPHA - DOUBLE PRECISION .
+ On entry, ALPHA specifies the scalar alpha.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, ka ), where ka is
+ k when TRANS = 'N' or 'n', and is n otherwise.
+ Before entry with TRANS = 'N' or 'n', the leading n by k
+ part of the array A must contain the matrix A, otherwise
+ the leading k by n part of the array A must contain the
+ matrix A.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When TRANS = 'N' or 'n'
+ then LDA must be at least max( 1, n ), otherwise LDA must
+ be at least max( 1, k ).
+ Unchanged on exit.
+
+ BETA - DOUBLE PRECISION.
+ On entry, BETA specifies the scalar beta.
+ Unchanged on exit.
+
+ C - COMPLEX*16 array of DIMENSION ( LDC, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array C must contain the upper
+ triangular part of the hermitian matrix and the strictly
+ lower triangular part of C is not referenced. On exit, the
+ upper triangular part of the array C is overwritten by the
+ upper triangular part of the updated matrix.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array C must contain the lower
+ triangular part of the hermitian matrix and the strictly
+ upper triangular part of C is not referenced. On exit, the
+ lower triangular part of the array C is overwritten by the
+ lower triangular part of the updated matrix.
+ Note that the imaginary parts of the diagonal elements need
+ not be set, they are assumed to be zero, and on exit they
+ are set to zero.
+
+ LDC - INTEGER.
+ On entry, LDC specifies the first dimension of C as declared
+ in the calling (sub) program. LDC must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+ -- Modified 8-Nov-93 to set C(J,J) to DBLE( C(J,J) ) when BETA = 1.
+ Ed Anderson, Cray Research Inc.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+
+ /* Function Body */
+ if (lsame_(trans, "N")) {
+ nrowa = *n;
+ } else {
+ nrowa = *k;
+ }
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! upper && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ info = 2;
+ } else if (*n < 0) {
+ info = 3;
+ } else if (*k < 0) {
+ info = 4;
+ } else if (*lda < max(1,nrowa)) {
+ info = 7;
+ } else if (*ldc < max(1,*n)) {
+ info = 10;
+ }
+ if (info != 0) {
+ xerbla_("ZHERK ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if ((*n == 0) || (((*alpha == 0.) || (*k == 0)) && *beta == 1.)) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (*alpha == 0.) {
+ if (upper) {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L30: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+/* L40: */
+ }
+ }
+ } else {
+ if (*beta == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lsame_(trans, "N")) {
+
+/* Form C := alpha*A*conjg( A' ) + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L90: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L100: */
+ }
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L110: */
+ }
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = i__ + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ }
+/* L120: */
+ }
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*beta == 0.) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ c__[i__3].r = 0., c__[i__3].i = 0.;
+/* L140: */
+ }
+ } else if (*beta != 1.) {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ z__1.r = *beta * c__[i__4].r, z__1.i = *beta * c__[
+ i__4].i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+/* L150: */
+ }
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = j + l * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ d_cnjg(&z__2, &a[j + l * a_dim1]);
+ z__1.r = *alpha * z__2.r, z__1.i = *alpha * z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = j + j * c_dim1;
+ i__4 = j + j * c_dim1;
+ i__5 = j + l * a_dim1;
+ z__1.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__1.i = temp.r * a[i__5].i + temp.i * a[i__5]
+ .r;
+ d__1 = c__[i__4].r + z__1.r;
+ c__[i__3].r = d__1, c__[i__3].i = 0.;
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * c_dim1;
+ i__6 = i__ + l * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6].i,
+ z__2.i = temp.r * a[i__6].i + temp.i * a[
+ i__6].r;
+ z__1.r = c__[i__5].r + z__2.r, z__1.i = c__[i__5]
+ .i + z__2.i;
+ c__[i__4].r = z__1.r, c__[i__4].i = z__1.i;
+/* L160: */
+ }
+ }
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ } else {
+
+/* Form C := alpha*conjg( A' )*A + beta*C. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L200: */
+ }
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L210: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+/* L220: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ rtemp = 0.;
+ i__2 = *k;
+ for (l = 1; l <= i__2; ++l) {
+ d_cnjg(&z__3, &a[l + j * a_dim1]);
+ i__3 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__3].r - z__3.i * a[i__3].i, z__2.i =
+ z__3.r * a[i__3].i + z__3.i * a[i__3].r;
+ z__1.r = rtemp + z__2.r, z__1.i = z__2.i;
+ rtemp = z__1.r;
+/* L230: */
+ }
+ if (*beta == 0.) {
+ i__2 = j + j * c_dim1;
+ d__1 = *alpha * rtemp;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ } else {
+ i__2 = j + j * c_dim1;
+ i__3 = j + j * c_dim1;
+ d__1 = *alpha * rtemp + *beta * c__[i__3].r;
+ c__[i__2].r = d__1, c__[i__2].i = 0.;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ temp.r = 0., temp.i = 0.;
+ i__3 = *k;
+ for (l = 1; l <= i__3; ++l) {
+ d_cnjg(&z__3, &a[l + i__ * a_dim1]);
+ i__4 = l + j * a_dim1;
+ z__2.r = z__3.r * a[i__4].r - z__3.i * a[i__4].i,
+ z__2.i = z__3.r * a[i__4].i + z__3.i * a[i__4]
+ .r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i + z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L240: */
+ }
+ if (*beta == 0.) {
+ i__3 = i__ + j * c_dim1;
+ z__1.r = *alpha * temp.r, z__1.i = *alpha * temp.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ } else {
+ i__3 = i__ + j * c_dim1;
+ z__2.r = *alpha * temp.r, z__2.i = *alpha * temp.i;
+ i__4 = i__ + j * c_dim1;
+ z__3.r = *beta * c__[i__4].r, z__3.i = *beta * c__[
+ i__4].i;
+ z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
+ c__[i__3].r = z__1.r, c__[i__3].i = z__1.i;
+ }
+/* L250: */
+ }
+/* L260: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZHERK . */
+
+} /* zherk_ */
+
+/* Subroutine */ int zscal_(integer *n, doublecomplex *za, doublecomplex *zx,
+ integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__, ix;
+
+
+/*
+ scales a vector by a constant.
+ jack dongarra, 3/11/78.
+ modified 3/93 to return if incx .le. 0.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zx;
+
+ /* Function Body */
+ if ((*n <= 0) || (*incx <= 0)) {
+ return 0;
+ }
+ if (*incx == 1) {
+ goto L20;
+ }
+
+/* code for increment not equal to 1 */
+
+ ix = 1;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ i__3 = ix;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+ ix += *incx;
+/* L10: */
+ }
+ return 0;
+
+/* code for increment equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__;
+ z__1.r = za->r * zx[i__3].r - za->i * zx[i__3].i, z__1.i = za->r * zx[
+ i__3].i + za->i * zx[i__3].r;
+ zx[i__2].r = z__1.r, zx[i__2].i = z__1.i;
+/* L30: */
+ }
+ return 0;
+} /* zscal_ */
+
+/* Subroutine */ int zswap_(integer *n, doublecomplex *zx, integer *incx,
+ doublecomplex *zy, integer *incy)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static doublecomplex ztemp;
+
+
+/*
+ interchanges two vectors.
+ jack dongarra, 3/11/78.
+ modified 12/3/93, array(1) declarations changed to array(*)
+*/
+
+
+ /* Parameter adjustments */
+ --zy;
+ --zx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = ix;
+ i__3 = iy;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = iy;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ ztemp.r = zx[i__2].r, ztemp.i = zx[i__2].i;
+ i__2 = i__;
+ i__3 = i__;
+ zx[i__2].r = zy[i__3].r, zx[i__2].i = zy[i__3].i;
+ i__2 = i__;
+ zy[i__2].r = ztemp.r, zy[i__2].i = ztemp.i;
+/* L30: */
+ }
+ return 0;
+} /* zswap_ */
+
+/* Subroutine */ int ztrmm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublecomplex temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRMM performs one of the matrix-matrix operations
+
+ B := alpha*op( A )*B, or B := alpha*B*op( A )
+
+ where alpha is a scalar, B is an m by n matrix, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) multiplies B from
+ the left or right as follows:
+
+ SIDE = 'L' or 'l' B := alpha*op( A )*B.
+
+ SIDE = 'R' or 'r' B := alpha*B*op( A ).
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the matrix B, and on exit is overwritten by the
+ transformed matrix.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*A*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if ((b[i__3].r != 0.) || (b[i__3].i != 0.)) {
+ i__3 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = k - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__6].r - temp.i * a[i__6]
+ .i, z__2.i = temp.r * a[i__6].i +
+ temp.i * a[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L30: */
+ }
+ if (nounit) {
+ i__3 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = k + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if ((b[i__2].r != 0.) || (b[i__2].i != 0.)) {
+ i__2 = k + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2]
+ .i, z__1.i = alpha->r * b[i__2].i +
+ alpha->i * b[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = k + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ i__3 = k + j * b_dim1;
+ i__4 = k + k * a_dim1;
+ z__1.r = b[i__3].r * a[i__4].r - b[i__3].i *
+ a[i__4].i, z__1.i = b[i__3].r * a[
+ i__4].i + b[i__3].i * a[i__4].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = k + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5]
+ .i, z__2.i = temp.r * a[i__5].i +
+ temp.i * a[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L60: */
+ }
+ }
+/* L70: */
+ }
+/* L80: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*A'*B or B := alpha*conjg( A' )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ temp.r = b[i__2].r, temp.i = b[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2]
+ .i, z__1.i = temp.r * a[i__2].i +
+ temp.i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = i__ - 1;
+ for (k = 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ temp.r = b[i__3].r, temp.i = b[i__3].i;
+ if (noconj) {
+ if (nounit) {
+ i__3 = i__ + i__ * a_dim1;
+ z__1.r = temp.r * a[i__3].r - temp.i * a[i__3]
+ .i, z__1.i = temp.r * a[i__3].i +
+ temp.i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (k = i__ + 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L140: */
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * temp.r - alpha->i * temp.i,
+ z__1.i = alpha->r * temp.i + alpha->i *
+ temp.r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*A. */
+
+ if (upper) {
+ for (j = *n; j >= 1; --j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[i__1]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[i__3]
+ .r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L170: */
+ }
+ i__1 = j - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if ((a[i__2].r != 0.) || (a[i__2].i != 0.)) {
+ i__2 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[i__2]
+ .i, z__1.i = alpha->r * a[i__2].i +
+ alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L180: */
+ }
+ }
+/* L190: */
+ }
+/* L200: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[i__2]
+ .r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[i__4]
+ .r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L210: */
+ }
+ i__2 = *n;
+ for (k = j + 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ i__3 = k + j * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[i__3]
+ .i, z__1.i = alpha->r * a[i__3].i +
+ alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+/* L240: */
+ }
+ }
+ } else {
+
+/* Form B := alpha*B*A' or B := alpha*B*conjg( A' ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__3].r - alpha->i * a[
+ i__3].i, z__1.i = alpha->r * a[i__3]
+ .i + alpha->i * a[i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r + z__2.r, z__1.i = b[i__5]
+ .i + z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__2 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if ((temp.r != 1.) || (temp.i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ } else {
+ for (k = *n; k >= 1; --k) {
+ i__1 = *n;
+ for (j = k + 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if ((a[i__2].r != 0.) || (a[i__2].i != 0.)) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ z__1.r = alpha->r * a[i__2].r - alpha->i * a[
+ i__2].i, z__1.i = alpha->r * a[i__2]
+ .i + alpha->i * a[i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[j + k * a_dim1]);
+ z__1.r = alpha->r * z__2.r - alpha->i *
+ z__2.i, z__1.i = alpha->r * z__2.i +
+ alpha->i * z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r + z__2.r, z__1.i = b[i__4]
+ .i + z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L290: */
+ }
+ }
+/* L300: */
+ }
+ temp.r = alpha->r, temp.i = alpha->i;
+ if (nounit) {
+ if (noconj) {
+ i__1 = k + k * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ if ((temp.r != 1.) || (temp.i != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L310: */
+ }
+ }
+/* L320: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMM . */
+
+} /* ztrmm_ */
+
+/* Subroutine */ int ztrmv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void d_cnjg(doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRMV performs one of the matrix-vector operations
+
+ x := A*x, or x := A'*x, or x := conjg( A' )*x,
+
+ where x is an n element vector and A is an n by n unit, or non-unit,
+ upper or lower triangular matrix.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the operation to be performed as
+ follows:
+
+ TRANS = 'N' or 'n' x := A*x.
+
+ TRANS = 'T' or 't' x := A'*x.
+
+ TRANS = 'C' or 'c' x := conjg( A' )*x.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element vector x. On exit, X is overwritten with the
+ tranformed vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRMV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := A*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L10: */
+ }
+ if (nounit) {
+ i__2 = j;
+ i__3 = j;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = kx;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r + z__2.r, z__1.i = x[i__4].i +
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+ ix += *incx;
+/* L30: */
+ }
+ if (nounit) {
+ i__2 = jx;
+ i__3 = jx;
+ i__4 = j + j * a_dim1;
+ z__1.r = x[i__3].r * a[i__4].r - x[i__3].i * a[
+ i__4].i, z__1.i = x[i__3].r * a[i__4].i +
+ x[i__3].i * a[i__4].r;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ }
+ jx += *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if ((x[i__1].r != 0.) || (x[i__1].i != 0.)) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__;
+ i__3 = i__;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+/* L50: */
+ }
+ if (nounit) {
+ i__1 = j;
+ i__2 = j;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+/* L60: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if ((x[i__1].r != 0.) || (x[i__1].i != 0.)) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = kx;
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = ix;
+ i__3 = ix;
+ i__4 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__4].r - temp.i * a[i__4].i,
+ z__2.i = temp.r * a[i__4].i + temp.i * a[
+ i__4].r;
+ z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i +
+ z__2.i;
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ ix -= *incx;
+/* L70: */
+ }
+ if (nounit) {
+ i__1 = jx;
+ i__2 = jx;
+ i__3 = j + j * a_dim1;
+ z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[
+ i__3].i, z__1.i = x[i__2].r * a[i__3].i +
+ x[i__2].i * a[i__3].r;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ }
+ jx -= *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := A'*x or x := conjg( A' )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__ + j * a_dim1;
+ i__2 = i__;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = i__;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__1].r - temp.i * a[i__1].i,
+ z__1.i = temp.r * a[i__1].i + temp.i * a[
+ i__1].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = i__ + j * a_dim1;
+ i__2 = ix;
+ z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[
+ i__2].i, z__2.i = a[i__1].r * x[i__2].i +
+ a[i__1].i * x[i__2].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__1 = ix;
+ z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i,
+ z__2.i = z__3.r * x[i__1].i + z__3.i * x[
+ i__1].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L130: */
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L170: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ if (noconj) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z__1.r = temp.r * a[i__2].r - temp.i * a[i__2].i,
+ z__1.i = temp.r * a[i__2].i + temp.i * a[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L180: */
+ }
+ } else {
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z__1.r = temp.r * z__2.r - temp.i * z__2.i,
+ z__1.i = temp.r * z__2.i + temp.i *
+ z__2.r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r + z__2.r, z__1.i = temp.i +
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRMV . */
+
+} /* ztrmv_ */
+
+/* Subroutine */ int ztrsm_(char *side, char *uplo, char *transa, char *diag,
+ integer *m, integer *n, doublecomplex *alpha, doublecomplex *a,
+ integer *lda, doublecomplex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5,
+ i__6, i__7;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, k, info;
+ static doublecomplex temp;
+ static logical lside;
+ extern logical lsame_(char *, char *);
+ static integer nrowa;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRSM solves one of the matrix equations
+
+ op( A )*X = alpha*B, or X*op( A ) = alpha*B,
+
+ where alpha is a scalar, X and B are m by n matrices, A is a unit, or
+ non-unit, upper or lower triangular matrix and op( A ) is one of
+
+ op( A ) = A or op( A ) = A' or op( A ) = conjg( A' ).
+
+ The matrix X is overwritten on B.
+
+ Parameters
+ ==========
+
+ SIDE - CHARACTER*1.
+ On entry, SIDE specifies whether op( A ) appears on the left
+ or right of X as follows:
+
+ SIDE = 'L' or 'l' op( A )*X = alpha*B.
+
+ SIDE = 'R' or 'r' X*op( A ) = alpha*B.
+
+ Unchanged on exit.
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix A is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANSA - CHARACTER*1.
+ On entry, TRANSA specifies the form of op( A ) to be used in
+ the matrix multiplication as follows:
+
+ TRANSA = 'N' or 'n' op( A ) = A.
+
+ TRANSA = 'T' or 't' op( A ) = A'.
+
+ TRANSA = 'C' or 'c' op( A ) = conjg( A' ).
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit triangular
+ as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ M - INTEGER.
+ On entry, M specifies the number of rows of B. M must be at
+ least zero.
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the number of columns of B. N must be
+ at least zero.
+ Unchanged on exit.
+
+ ALPHA - COMPLEX*16 .
+ On entry, ALPHA specifies the scalar alpha. When alpha is
+ zero then A is not referenced and B need not be set before
+ entry.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, k ), where k is m
+ when SIDE = 'L' or 'l' and is n when SIDE = 'R' or 'r'.
+ Before entry with UPLO = 'U' or 'u', the leading k by k
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading k by k
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. When SIDE = 'L' or 'l' then
+ LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
+ then LDA must be at least max( 1, n ).
+ Unchanged on exit.
+
+ B - COMPLEX*16 array of DIMENSION ( LDB, n ).
+ Before entry, the leading m by n part of the array B must
+ contain the right-hand side matrix B, and on exit is
+ overwritten by the solution matrix X.
+
+ LDB - INTEGER.
+ On entry, LDB specifies the first dimension of B as declared
+ in the calling (sub) program. LDB must be at least
+ max( 1, m ).
+ Unchanged on exit.
+
+
+ Level 3 Blas routine.
+
+ -- Written on 8-February-1989.
+ Jack Dongarra, Argonne National Laboratory.
+ Iain Duff, AERE Harwell.
+ Jeremy Du Croz, Numerical Algorithms Group Ltd.
+ Sven Hammarling, Numerical Algorithms Group Ltd.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ lside = lsame_(side, "L");
+ if (lside) {
+ nrowa = *m;
+ } else {
+ nrowa = *n;
+ }
+ noconj = lsame_(transa, "T");
+ nounit = lsame_(diag, "N");
+ upper = lsame_(uplo, "U");
+
+ info = 0;
+ if (! lside && ! lsame_(side, "R")) {
+ info = 1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ info = 2;
+ } else if (! lsame_(transa, "N") && ! lsame_(transa,
+ "T") && ! lsame_(transa, "C")) {
+ info = 3;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 4;
+ } else if (*m < 0) {
+ info = 5;
+ } else if (*n < 0) {
+ info = 6;
+ } else if (*lda < max(1,nrowa)) {
+ info = 9;
+ } else if (*ldb < max(1,*m)) {
+ info = 11;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSM ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* And when alpha.eq.zero. */
+
+ if (alpha->r == 0. && alpha->i == 0.) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = 0., b[i__3].i = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/* Start the operations. */
+
+ if (lside) {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*inv( A )*B. */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L30: */
+ }
+ }
+ for (k = *m; k >= 1; --k) {
+ i__2 = k + j * b_dim1;
+ if ((b[i__2].r != 0.) || (b[i__2].i != 0.)) {
+ if (nounit) {
+ i__2 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+ }
+ i__2 = k - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * b_dim1;
+ i__6 = i__ + k * a_dim1;
+ z__2.r = b[i__5].r * a[i__6].r - b[i__5].i *
+ a[i__6].i, z__2.i = b[i__5].r * a[
+ i__6].i + b[i__5].i * a[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L40: */
+ }
+ }
+/* L50: */
+ }
+/* L60: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ i__2 = *m;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * b_dim1;
+ if ((b[i__3].r != 0.) || (b[i__3].i != 0.)) {
+ if (nounit) {
+ i__3 = k + j * b_dim1;
+ z_div(&z__1, &b[k + j * b_dim1], &a[k + k *
+ a_dim1]);
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = k + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * b_dim1;
+ i__7 = i__ + k * a_dim1;
+ z__2.r = b[i__6].r * a[i__7].r - b[i__6].i *
+ a[i__7].i, z__2.i = b[i__6].r * a[
+ i__7].i + b[i__6].i * a[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L80: */
+ }
+ }
+/* L90: */
+ }
+/* L100: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*inv( A' )*B
+ or B := alpha*inv( conjg( A' ) )*B.
+*/
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3].i,
+ z__1.i = alpha->r * b[i__3].i + alpha->i * b[
+ i__3].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ i__4 = k + i__ * a_dim1;
+ i__5 = k + j * b_dim1;
+ z__2.r = a[i__4].r * b[i__5].r - a[i__4].i *
+ b[i__5].i, z__2.i = a[i__4].r * b[
+ i__5].i + a[i__4].i * b[i__5].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L110: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__3 = i__ - 1;
+ for (k = 1; k <= i__3; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__4 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__4].r - z__3.i * b[i__4]
+ .i, z__2.i = z__3.r * b[i__4].i +
+ z__3.i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L120: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__3 = i__ + j * b_dim1;
+ b[i__3].r = temp.r, b[i__3].i = temp.i;
+/* L130: */
+ }
+/* L140: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ for (i__ = *m; i__ >= 1; --i__) {
+ i__2 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__2].r - alpha->i * b[i__2].i,
+ z__1.i = alpha->r * b[i__2].i + alpha->i * b[
+ i__2].r;
+ temp.r = z__1.r, temp.i = z__1.i;
+ if (noconj) {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ i__3 = k + i__ * a_dim1;
+ i__4 = k + j * b_dim1;
+ z__2.r = a[i__3].r * b[i__4].r - a[i__3].i *
+ b[i__4].i, z__2.i = a[i__3].r * b[
+ i__4].i + a[i__3].i * b[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[i__ + i__ * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = *m;
+ for (k = i__ + 1; k <= i__2; ++k) {
+ d_cnjg(&z__3, &a[k + i__ * a_dim1]);
+ i__3 = k + j * b_dim1;
+ z__2.r = z__3.r * b[i__3].r - z__3.i * b[i__3]
+ .i, z__2.i = z__3.r * b[i__3].i +
+ z__3.i * b[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[i__ + i__ * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = i__ + j * b_dim1;
+ b[i__2].r = temp.r, b[i__2].i = temp.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+ }
+ } else {
+ if (lsame_(transa, "N")) {
+
+/* Form B := alpha*B*inv( A ). */
+
+ if (upper) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L190: */
+ }
+ }
+ i__2 = j - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + j * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = k + j * a_dim1;
+ i__7 = i__ + k * b_dim1;
+ z__2.r = a[i__6].r * b[i__7].r - a[i__6].i *
+ b[i__7].i, z__2.i = a[i__6].r * b[
+ i__7].i + a[i__6].i * b[i__7].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L200: */
+ }
+ }
+/* L210: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b1077, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L220: */
+ }
+ }
+/* L230: */
+ }
+ } else {
+ for (j = *n; j >= 1; --j) {
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L240: */
+ }
+ }
+ i__1 = *n;
+ for (k = j + 1; k <= i__1; ++k) {
+ i__2 = k + j * a_dim1;
+ if ((a[i__2].r != 0.) || (a[i__2].i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = k + j * a_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = a[i__5].r * b[i__6].r - a[i__5].i *
+ b[i__6].i, z__2.i = a[i__5].r * b[
+ i__6].i + a[i__5].i * b[i__6].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L250: */
+ }
+ }
+/* L260: */
+ }
+ if (nounit) {
+ z_div(&z__1, &c_b1077, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * b_dim1;
+ i__3 = i__ + j * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L270: */
+ }
+ }
+/* L280: */
+ }
+ }
+ } else {
+
+/*
+ Form B := alpha*B*inv( A' )
+ or B := alpha*B*inv( conjg( A' ) ).
+*/
+
+ if (upper) {
+ for (k = *n; k >= 1; --k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b1077, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b1077, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__3].r - temp.i * b[i__3].i,
+ z__1.i = temp.r * b[i__3].i + temp.i * b[
+ i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L290: */
+ }
+ }
+ i__1 = k - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + k * a_dim1;
+ if ((a[i__2].r != 0.) || (a[i__2].i != 0.)) {
+ if (noconj) {
+ i__2 = j + k * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__5].r - temp.i * b[i__5]
+ .i, z__2.i = temp.r * b[i__5].i +
+ temp.i * b[i__5].r;
+ z__1.r = b[i__4].r - z__2.r, z__1.i = b[i__4]
+ .i - z__2.i;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L300: */
+ }
+ }
+/* L310: */
+ }
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + k * b_dim1;
+ i__3 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__3].r - alpha->i * b[i__3]
+ .i, z__1.i = alpha->r * b[i__3].i +
+ alpha->i * b[i__3].r;
+ b[i__2].r = z__1.r, b[i__2].i = z__1.i;
+/* L320: */
+ }
+ }
+/* L330: */
+ }
+ } else {
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+ if (nounit) {
+ if (noconj) {
+ z_div(&z__1, &c_b1077, &a[k + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ } else {
+ d_cnjg(&z__2, &a[k + k * a_dim1]);
+ z_div(&z__1, &c_b1077, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = temp.r * b[i__4].r - temp.i * b[i__4].i,
+ z__1.i = temp.r * b[i__4].i + temp.i * b[
+ i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L340: */
+ }
+ }
+ i__2 = *n;
+ for (j = k + 1; j <= i__2; ++j) {
+ i__3 = j + k * a_dim1;
+ if ((a[i__3].r != 0.) || (a[i__3].i != 0.)) {
+ if (noconj) {
+ i__3 = j + k * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ } else {
+ d_cnjg(&z__1, &a[j + k * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ i__3 = *m;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * b_dim1;
+ i__5 = i__ + j * b_dim1;
+ i__6 = i__ + k * b_dim1;
+ z__2.r = temp.r * b[i__6].r - temp.i * b[i__6]
+ .i, z__2.i = temp.r * b[i__6].i +
+ temp.i * b[i__6].r;
+ z__1.r = b[i__5].r - z__2.r, z__1.i = b[i__5]
+ .i - z__2.i;
+ b[i__4].r = z__1.r, b[i__4].i = z__1.i;
+/* L350: */
+ }
+ }
+/* L360: */
+ }
+ if ((alpha->r != 1.) || (alpha->i != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + k * b_dim1;
+ i__4 = i__ + k * b_dim1;
+ z__1.r = alpha->r * b[i__4].r - alpha->i * b[i__4]
+ .i, z__1.i = alpha->r * b[i__4].i +
+ alpha->i * b[i__4].r;
+ b[i__3].r = z__1.r, b[i__3].i = z__1.i;
+/* L370: */
+ }
+ }
+/* L380: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSM . */
+
+} /* ztrsm_ */
+
+/* Subroutine */ int ztrsv_(char *uplo, char *trans, char *diag, integer *n,
+ doublecomplex *a, integer *lda, doublecomplex *x, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ doublecomplex z__1, z__2, z__3;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg(
+ doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer i__, j, ix, jx, kx, info;
+ static doublecomplex temp;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconj, nounit;
+
+
+/*
+ Purpose
+ =======
+
+ ZTRSV solves one of the systems of equations
+
+ A*x = b, or A'*x = b, or conjg( A' )*x = b,
+
+ where b and x are n element vectors and A is an n by n unit, or
+ non-unit, upper or lower triangular matrix.
+
+ No test for singularity or near-singularity is included in this
+ routine. Such tests must be performed before calling this routine.
+
+ Parameters
+ ==========
+
+ UPLO - CHARACTER*1.
+ On entry, UPLO specifies whether the matrix is an upper or
+ lower triangular matrix as follows:
+
+ UPLO = 'U' or 'u' A is an upper triangular matrix.
+
+ UPLO = 'L' or 'l' A is a lower triangular matrix.
+
+ Unchanged on exit.
+
+ TRANS - CHARACTER*1.
+ On entry, TRANS specifies the equations to be solved as
+ follows:
+
+ TRANS = 'N' or 'n' A*x = b.
+
+ TRANS = 'T' or 't' A'*x = b.
+
+ TRANS = 'C' or 'c' conjg( A' )*x = b.
+
+ Unchanged on exit.
+
+ DIAG - CHARACTER*1.
+ On entry, DIAG specifies whether or not A is unit
+ triangular as follows:
+
+ DIAG = 'U' or 'u' A is assumed to be unit triangular.
+
+ DIAG = 'N' or 'n' A is not assumed to be unit
+ triangular.
+
+ Unchanged on exit.
+
+ N - INTEGER.
+ On entry, N specifies the order of the matrix A.
+ N must be at least zero.
+ Unchanged on exit.
+
+ A - COMPLEX*16 array of DIMENSION ( LDA, n ).
+ Before entry with UPLO = 'U' or 'u', the leading n by n
+ upper triangular part of the array A must contain the upper
+ triangular matrix and the strictly lower triangular part of
+ A is not referenced.
+ Before entry with UPLO = 'L' or 'l', the leading n by n
+ lower triangular part of the array A must contain the lower
+ triangular matrix and the strictly upper triangular part of
+ A is not referenced.
+ Note that when DIAG = 'U' or 'u', the diagonal elements of
+ A are not referenced either, but are assumed to be unity.
+ Unchanged on exit.
+
+ LDA - INTEGER.
+ On entry, LDA specifies the first dimension of A as declared
+ in the calling (sub) program. LDA must be at least
+ max( 1, n ).
+ Unchanged on exit.
+
+ X - COMPLEX*16 array of dimension at least
+ ( 1 + ( n - 1 )*abs( INCX ) ).
+ Before entry, the incremented array X must contain the n
+ element right-hand side vector b. On exit, X is overwritten
+ with the solution vector x.
+
+ INCX - INTEGER.
+ On entry, INCX specifies the increment for the elements of
+ X. INCX must not be zero.
+ Unchanged on exit.
+
+
+ Level 2 Blas routine.
+
+ -- Written on 22-October-1986.
+ Jack Dongarra, Argonne National Lab.
+ Jeremy Du Croz, Nag Central Office.
+ Sven Hammarling, Nag Central Office.
+ Richard Hanson, Sandia National Labs.
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+
+ /* Function Body */
+ info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ info = 1;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T") && ! lsame_(trans, "C")) {
+ info = 2;
+ } else if (! lsame_(diag, "U") && ! lsame_(diag,
+ "N")) {
+ info = 3;
+ } else if (*n < 0) {
+ info = 4;
+ } else if (*lda < max(1,*n)) {
+ info = 6;
+ } else if (*incx == 0) {
+ info = 8;
+ }
+ if (info != 0) {
+ xerbla_("ZTRSV ", &info);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ noconj = lsame_(trans, "T");
+ nounit = lsame_(diag, "N");
+
+/*
+ Set up the start point in X if the increment is not unity. This
+ will be ( N - 1 )*INCX too small for descending loops.
+*/
+
+ if (*incx <= 0) {
+ kx = 1 - (*n - 1) * *incx;
+ } else if (*incx != 1) {
+ kx = 1;
+ }
+
+/*
+ Start the operations. In this version the elements of A are
+ accessed sequentially with one pass through A.
+*/
+
+ if (lsame_(trans, "N")) {
+
+/* Form x := inv( A )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ if ((x[i__1].r != 0.) || (x[i__1].i != 0.)) {
+ if (nounit) {
+ i__1 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ i__1 = i__;
+ i__2 = i__;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else {
+ jx = kx + (*n - 1) * *incx;
+ for (j = *n; j >= 1; --j) {
+ i__1 = jx;
+ if ((x[i__1].r != 0.) || (x[i__1].i != 0.)) {
+ if (nounit) {
+ i__1 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+ }
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ ix = jx;
+ for (i__ = j - 1; i__ >= 1; --i__) {
+ ix -= *incx;
+ i__1 = ix;
+ i__2 = ix;
+ i__3 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__3].r - temp.i * a[i__3].i,
+ z__2.i = temp.r * a[i__3].i + temp.i * a[
+ i__3].r;
+ z__1.r = x[i__2].r - z__2.r, z__1.i = x[i__2].i -
+ z__2.i;
+ x[i__1].r = z__1.r, x[i__1].i = z__1.i;
+/* L30: */
+ }
+ }
+ jx -= *incx;
+/* L40: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ if (nounit) {
+ i__2 = j;
+ z_div(&z__1, &x[j], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__;
+ i__4 = i__;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = jx;
+ if ((x[i__2].r != 0.) || (x[i__2].i != 0.)) {
+ if (nounit) {
+ i__2 = jx;
+ z_div(&z__1, &x[jx], &a[j + j * a_dim1]);
+ x[i__2].r = z__1.r, x[i__2].i = z__1.i;
+ }
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ ix = jx;
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ ix += *incx;
+ i__3 = ix;
+ i__4 = ix;
+ i__5 = i__ + j * a_dim1;
+ z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i,
+ z__2.i = temp.r * a[i__5].i + temp.i * a[
+ i__5].r;
+ z__1.r = x[i__4].r - z__2.r, z__1.i = x[i__4].i -
+ z__2.i;
+ x[i__3].r = z__1.r, x[i__3].i = z__1.i;
+/* L70: */
+ }
+ }
+ jx += *incx;
+/* L80: */
+ }
+ }
+ }
+ } else {
+
+/* Form x := inv( A' )*x or x := inv( conjg( A' ) )*x. */
+
+ if (lsame_(uplo, "U")) {
+ if (*incx == 1) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L90: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = i__;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L100: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = j;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+/* L110: */
+ }
+ } else {
+ jx = kx;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ix = kx;
+ i__2 = jx;
+ temp.r = x[i__2].r, temp.i = x[i__2].i;
+ if (noconj) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = ix;
+ z__2.r = a[i__3].r * x[i__4].r - a[i__3].i * x[
+ i__4].i, z__2.i = a[i__3].r * x[i__4].i +
+ a[i__3].i * x[i__4].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L120: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__3 = ix;
+ z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i,
+ z__2.i = z__3.r * x[i__3].i + z__3.i * x[
+ i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix += *incx;
+/* L130: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__2 = jx;
+ x[i__2].r = temp.r, x[i__2].i = temp.i;
+ jx += *incx;
+/* L140: */
+ }
+ }
+ } else {
+ if (*incx == 1) {
+ for (j = *n; j >= 1; --j) {
+ i__1 = j;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L150: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = i__;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+/* L160: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = j;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+/* L170: */
+ }
+ } else {
+ kx += (*n - 1) * *incx;
+ jx = kx;
+ for (j = *n; j >= 1; --j) {
+ ix = kx;
+ i__1 = jx;
+ temp.r = x[i__1].r, temp.i = x[i__1].i;
+ if (noconj) {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = ix;
+ z__2.r = a[i__2].r * x[i__3].r - a[i__2].i * x[
+ i__3].i, z__2.i = a[i__2].r * x[i__3].i +
+ a[i__2].i * x[i__3].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L180: */
+ }
+ if (nounit) {
+ z_div(&z__1, &temp, &a[j + j * a_dim1]);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ } else {
+ i__1 = j + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ d_cnjg(&z__3, &a[i__ + j * a_dim1]);
+ i__2 = ix;
+ z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i,
+ z__2.i = z__3.r * x[i__2].i + z__3.i * x[
+ i__2].r;
+ z__1.r = temp.r - z__2.r, z__1.i = temp.i -
+ z__2.i;
+ temp.r = z__1.r, temp.i = z__1.i;
+ ix -= *incx;
+/* L190: */
+ }
+ if (nounit) {
+ d_cnjg(&z__2, &a[j + j * a_dim1]);
+ z_div(&z__1, &temp, &z__2);
+ temp.r = z__1.r, temp.i = z__1.i;
+ }
+ }
+ i__1 = jx;
+ x[i__1].r = temp.r, x[i__1].i = temp.i;
+ jx -= *incx;
+/* L200: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRSV . */
+
+} /* ztrsv_ */
+
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index 1b26c23e4..4a517d531 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -1,7 +1,8 @@
-#!/usr/bin/env python2.4
+#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
import sys, os
-from cStringIO import StringIO
+from io import StringIO
import re
from Plex import *
@@ -31,7 +32,7 @@ def runScanner(data, scanner_class, lexicon=None):
scanner = scanner_class(lexicon, info)
else:
scanner = scanner_class(info)
- while 1:
+ while True:
value, text = scanner.read()
if value is None:
break
@@ -251,7 +252,7 @@ def scrubSource(source, nsteps=None, verbose=False):
for msg, step in steps:
if verbose:
- print msg
+ print(msg)
source = step(source)
return source
diff --git a/numpy/linalg/dlamch.c b/numpy/linalg/lapack_lite/dlamch.c
index bf1dfdb05..bf1dfdb05 100644
--- a/numpy/linalg/dlamch.c
+++ b/numpy/linalg/lapack_lite/dlamch.c
diff --git a/numpy/linalg/lapack_lite/dlapack_lite.c b/numpy/linalg/lapack_lite/dlapack_lite.c
new file mode 100644
index 000000000..6a36fe6a8
--- /dev/null
+++ b/numpy/linalg/lapack_lite/dlapack_lite.c
@@ -0,0 +1,100833 @@
+/*
+NOTE: This is generated code. Look in Misc/lapack_lite for information on
+ remaking this file.
+*/
+#include "f2c.h"
+
+#ifdef HAVE_CONFIG
+#include "config.h"
+#else
+extern doublereal dlamch_(char *);
+#define EPSILON dlamch_("Epsilon")
+#define SAFEMINIMUM dlamch_("Safe minimum")
+#define PRECISION dlamch_("Precision")
+#define BASE dlamch_("Base")
+#endif
+
+extern doublereal dlapy2_(doublereal *x, doublereal *y);
+
+
+
+/* Table of constant values */
+
+static integer c__1 = 1;
+static complex c_b55 = {0.f,0.f};
+static complex c_b56 = {1.f,0.f};
+static integer c_n1 = -1;
+static integer c__3 = 3;
+static integer c__2 = 2;
+static integer c__0 = 0;
+static integer c__8 = 8;
+static integer c__4 = 4;
+static integer c__65 = 65;
+static integer c__6 = 6;
+static integer c__9 = 9;
+static real c_b320 = 0.f;
+static real c_b1011 = 1.f;
+static integer c__15 = 15;
+static logical c_false = FALSE_;
+static real c_b1290 = -1.f;
+static real c_b2206 = .5f;
+static doublereal c_b2865 = 1.;
+static doublereal c_b2879 = 0.;
+static doublereal c_b2944 = -.125;
+static doublereal c_b3001 = -1.;
+static integer c__10 = 10;
+static integer c__11 = 11;
+static doublereal c_b5654 = 2.;
+static logical c_true = TRUE_;
+static real c_b9647 = 2.f;
+
+/* Subroutine */ int cgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *scale, integer *m, complex *v, integer *ldv,
+ integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static real s;
+ static integer ii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ static logical leftv;
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ static logical rightv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGEBAK forms the right or left eigenvectors of a complex general
+ matrix by backward transformation on the computed eigenvectors of the
+ balanced matrix output by CGEBAL.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the type of backward transformation required:
+ = 'N', do nothing, return immediately;
+ = 'P', do backward transformation for permutation only;
+ = 'S', do backward transformation for scaling only;
+ = 'B', do backward transformations for both permutation and
+ scaling.
+ JOB must be the same as the argument JOB supplied to CGEBAL.
+
+ SIDE (input) CHARACTER*1
+ = 'R': V contains right eigenvectors;
+ = 'L': V contains left eigenvectors.
+
+ N (input) INTEGER
+ The number of rows of the matrix V. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ The integers ILO and IHI determined by CGEBAL.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ SCALE (input) REAL array, dimension (N)
+ Details of the permutation and scaling factors, as returned
+ by CGEBAL.
+
+ M (input) INTEGER
+ The number of columns of the matrix V. M >= 0.
+
+ V (input/output) COMPLEX array, dimension (LDV,M)
+ On entry, the matrix of right or left eigenvectors to be
+ transformed, as returned by CHSEIN or CTREVC.
+ On exit, V is overwritten by the transformed eigenvectors.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V. LDV >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Decode and Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if ((lsame_(job, "S")) || (lsame_(job, "B"))) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ csscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1.f / scale[i__];
+ csscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/*
+ Backward permutation
+
+ For I = ILO-1 step -1 until 1,
+ IHI+1 step 1 until N do --
+*/
+
+L30:
+ if ((lsame_(job, "P")) || (lsame_(job, "B"))) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ cswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CGEBAK */
+
+} /* cgebak_ */
+
+/* Subroutine */ int cgebal_(char *job, integer *n, complex *a, integer *lda,
+ integer *ilo, integer *ihi, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_imag(complex *), c_abs(complex *);
+
+ /* Local variables */
+ static real c__, f, g;
+ static integer i__, j, k, l, m;
+ static real r__, s, ca, ra;
+ static integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ static real sfmin1, sfmin2, sfmax1, sfmax2;
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ static logical noconv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGEBAL balances a general complex matrix A. This involves, first,
+ permuting A by a similarity transformation to isolate eigenvalues
+ in the first 1 to ILO-1 and last IHI+1 to N elements on the
+ diagonal; and second, applying a diagonal similarity transformation
+ to rows and columns ILO to IHI to make the rows and columns as
+ close in norm as possible. Both steps are optional.
+
+ Balancing may reduce the 1-norm of the matrix, and improve the
+ accuracy of the computed eigenvalues and/or eigenvectors.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the operations to be performed on A:
+ = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
+ for i = 1,...,N;
+ = 'P': permute only;
+ = 'S': scale only;
+ = 'B': both permute and scale.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the input matrix A.
+ On exit, A is overwritten by the balanced matrix.
+ If JOB = 'N', A is not referenced.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ ILO (output) INTEGER
+ IHI (output) INTEGER
+ ILO and IHI are set to integers such that on exit
+ A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
+ If JOB = 'N' or 'S', ILO = 1 and IHI = N.
+
+ SCALE (output) REAL array, dimension (N)
+ Details of the permutations and scaling factors applied to
+ A. If P(j) is the index of the row and column interchanged
+ with row and column j and D(j) is the scaling factor
+ applied to row and column j, then
+ SCALE(j) = P(j) for j = 1,...,ILO-1
+ = D(j) for j = ILO,...,IHI
+ = P(j) for j = IHI+1,...,N.
+ The order in which the interchanges are made is N to IHI+1,
+ then 1 to ILO-1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The permutations consist of row and column interchanges which put
+ the matrix in the form
+
+ ( T1 X Y )
+ P A P = ( 0 B Z )
+ ( 0 0 T2 )
+
+ where T1 and T2 are upper triangular matrices whose eigenvalues lie
+ along the diagonal. The column indices ILO and IHI mark the starting
+ and ending columns of the submatrix B. Balancing consists of applying
+ a diagonal similarity transformation inv(D) * B * D to make the
+ 1-norms of each row of B and its corresponding column nearly equal.
+ The output matrix is
+
+ ( T1 X*D Y )
+ ( 0 inv(D)*B*D inv(D)*Z ).
+ ( 0 0 T2 )
+
+ Information about the permutations P and the diagonal matrix D is
+ returned in the vector SCALE.
+
+ This subroutine is based on the EISPACK routine CBAL.
+
+ Modified by Tzu-Yi Chen, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (real) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ cswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ cswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ i__2 = j + i__ * a_dim1;
+ if ((a[i__2].r != 0.f) || (r_imag(&a[j + i__ * a_dim1]) != 0.f)) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ i__3 = i__ + j * a_dim1;
+ if ((a[i__3].r != 0.f) || (r_imag(&a[i__ + j * a_dim1]) != 0.f)) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/*
+ Balance the submatrix in rows K to L.
+
+ Iterative loop for norm reduction
+*/
+
+ sfmin1 = slamch_("S") / slamch_("P");
+ sfmax1 = 1.f / sfmin1;
+ sfmin2 = sfmin1 * 8.f;
+ sfmax2 = 1.f / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.f;
+ r__ = 0.f;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ i__3 = j + i__ * a_dim1;
+ c__ += (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[j + i__
+ * a_dim1]), dabs(r__2));
+ i__3 = i__ + j * a_dim1;
+ r__ += (r__1 = a[i__3].r, dabs(r__1)) + (r__2 = r_imag(&a[i__ + j
+ * a_dim1]), dabs(r__2));
+L150:
+ ;
+ }
+ ica = icamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = c_abs(&a[ica + i__ * a_dim1]);
+ i__2 = *n - k + 1;
+ ira = icamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = c_abs(&a[i__ + (ira + k - 1) * a_dim1]);
+
+/* Guard against zero C or R due to underflow. */
+
+ if ((c__ == 0.f) || (r__ == 0.f)) {
+ goto L200;
+ }
+ g = r__ / 8.f;
+ f = 1.f;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ r__1 = max(f,c__);
+/* Computing MIN */
+ r__2 = min(r__,g);
+ if (((c__ >= g) || (dmax(r__1,ca) >= sfmax2)) || (dmin(r__2,ra) <=
+ sfmin2)) {
+ goto L170;
+ }
+ f *= 8.f;
+ c__ *= 8.f;
+ ca *= 8.f;
+ r__ /= 8.f;
+ g /= 8.f;
+ ra /= 8.f;
+ goto L160;
+
+L170:
+ g = c__ / 8.f;
+L180:
+/* Computing MIN */
+ r__1 = min(f,c__), r__1 = min(r__1,g);
+ if (((g < r__) || (dmax(r__,ra) >= sfmax2)) || (dmin(r__1,ca) <=
+ sfmin2)) {
+ goto L190;
+ }
+ f /= 8.f;
+ c__ /= 8.f;
+ g /= 8.f;
+ ca /= 8.f;
+ r__ *= 8.f;
+ ra *= 8.f;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95f) {
+ goto L200;
+ }
+ if (f < 1.f && scale[i__] < 1.f) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1.f && scale[i__] > 1.f) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1.f / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ csscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ csscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of CGEBAL */
+
+} /* cgebal_ */
+
+/* Subroutine */ int cgebd2_(integer *m, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tauq, complex *taup, complex *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__;
+ static complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGEBD2 reduces a complex general m by n matrix A to upper or lower
+ real bidiagonal form B by a unitary transformation: Q' * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the unitary matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the unitary matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) COMPLEX array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ WORK (workspace) COMPLEX array, dimension (max(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in
+ A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in
+ A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, v and u are complex vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tauq[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.f;
+
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector G(i) to annihilate
+ A(i,i+2:n)
+*/
+
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ clarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ } else {
+ i__2 = i__;
+ taup[i__2].r = 0.f, taup[i__2].i = 0.f;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+/* Computing MIN */
+ i__4 = i__ + 1;
+ clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
+ i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = d__[i__3], a[i__2].i = 0.f;
+
+ if (i__ < *m) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:m,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i)' to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tauq[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &q__1, &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &
+ work[1]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ } else {
+ i__2 = i__;
+ tauq[i__2].r = 0.f, tauq[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CGEBD2 */
+
+} /* cgebd2_ */
+
+/* Subroutine */ int cgebrd_(integer *m, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tauq, complex *taup, complex *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, nb, nx;
+ static real ws;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ static integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int cgebd2_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *), clabrd_(integer *, integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwrkx, ldwrky, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGEBRD reduces a general complex M-by-N matrix A to upper or lower
+ bidiagonal form B by a unitary transformation: Q**H * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the unitary matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the unitary matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) COMPLEX array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,M,N).
+ For optimum performance LWORK >= (M+N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in
+ A(i+1:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in
+ A(i,i+2:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors; v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "CGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ r__1 = (real) lwkopt;
+ work[1].r = r__1, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("CGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ ws = (real) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/*
+ Set the crossover point NX.
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (real) ((*m + *n) * nb);
+ if ((real) (*lwork) < ws) {
+
+/*
+ Not enough work space for the optimal NB, consider using
+ a smaller block size.
+*/
+
+ nbmin = ilaenv_(&c__2, "CGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/*
+ Reduce rows and columns i:i+ib-1 to bidiagonal form and return
+ the matrices X and Y which are needed to update the unreduced
+ part of the matrix
+*/
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ clabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/*
+ Update the trailing submatrix A(i+ib:m,i+ib:n), using
+ an update of the form A := A - V*Y' - X*U'
+*/
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &i__4, &nb, &
+ q__1, &a[i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb +
+ nb + 1], &ldwrky, &c_b56, &a[i__ + nb + (i__ + nb) * a_dim1],
+ lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &q__1, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b56, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.f;
+ i__4 = j + (j + 1) * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = d__[i__5], a[i__4].i = 0.f;
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1].r = ws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEBRD */
+
+} /* cgebrd_ */
+
+/* Subroutine */ int cgeev_(char *jobvl, char *jobvr, integer *n, complex *a,
+ integer *lda, complex *w, complex *vl, integer *ldvl, complex *vr,
+ integer *ldvr, complex *work, integer *lwork, real *rwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, k, ihi;
+ static real scl;
+ static integer ilo;
+ static real dum[1], eps;
+ static complex tmp;
+ static integer ibal;
+ static char side[1];
+ static integer maxb;
+ static real anrm;
+ static integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern doublereal scnrm2_(integer *, complex *, integer *);
+ extern /* Subroutine */ int cgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, complex *, integer *, integer *), cgebal_(char *, integer *, complex *, integer *,
+ integer *, integer *, real *, integer *), slabad_(real *,
+ real *);
+ static logical scalea;
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ static real cscale;
+ extern /* Subroutine */ int cgehrd_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical select[1];
+ static real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int chseqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), ctrevc_(char *,
+ char *, logical *, integer *, complex *, integer *, complex *,
+ integer *, complex *, integer *, integer *, integer *, complex *,
+ real *, integer *), cunghr_(integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ integer *);
+ static integer minwrk, maxwrk;
+ static logical wantvl;
+ static real smlnum;
+ static integer hswork, irwork;
+ static logical lquery, wantvr;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGEEV computes for an N-by-N complex nonsymmetric matrix A, the
+ eigenvalues and, optionally, the left and/or right eigenvectors.
+
+ The right eigenvector v(j) of A satisfies
+ A * v(j) = lambda(j) * v(j)
+ where lambda(j) is its eigenvalue.
+ The left eigenvector u(j) of A satisfies
+ u(j)**H * A = lambda(j) * u(j)**H
+ where u(j)**H denotes the conjugate transpose of u(j).
+
+ The computed eigenvectors are normalized to have Euclidean norm
+ equal to 1 and largest component real.
+
+ Arguments
+ =========
+
+ JOBVL (input) CHARACTER*1
+ = 'N': left eigenvectors of A are not computed;
+ = 'V': left eigenvectors of are computed.
+
+ JOBVR (input) CHARACTER*1
+ = 'N': right eigenvectors of A are not computed;
+ = 'V': right eigenvectors of A are computed.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the N-by-N matrix A.
+ On exit, A has been overwritten.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) COMPLEX array, dimension (N)
+ W contains the computed eigenvalues.
+
+ VL (output) COMPLEX array, dimension (LDVL,N)
+ If JOBVL = 'V', the left eigenvectors u(j) are stored one
+ after another in the columns of VL, in the same order
+ as their eigenvalues.
+ If JOBVL = 'N', VL is not referenced.
+ u(j) = VL(:,j), the j-th column of VL.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= 1; if
+ JOBVL = 'V', LDVL >= N.
+
+ VR (output) COMPLEX array, dimension (LDVR,N)
+ If JOBVR = 'V', the right eigenvectors v(j) are stored one
+ after another in the columns of VR, in the same order
+ as their eigenvalues.
+ If JOBVR = 'N', VR is not referenced.
+ v(j) = VR(:,j), the j-th column of VR.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= 1; if
+ JOBVR = 'V', LDVR >= N.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,2*N).
+ For good performance, LWORK must generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace) REAL array, dimension (2*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = i, the QR algorithm failed to compute all the
+ eigenvalues, and no eigenvectors have been computed;
+ elements and i+1:N of W contain eigenvalues which have
+ converged.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*ldvl < 1) || (wantvl && *ldvl < *n)) {
+ *info = -8;
+ } else if ((*ldvr < 1) || (wantvr && *ldvr < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ CWorkspace refers to complex workspace, and RWorkspace to real
+ workspace. NB refers to the optimal block size for the
+ immediately following subroutine, as returned by ILAENV.
+ HSWORK refers to the workspace preferred by CHSEQR, as
+ calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
+ the worst case.)
+*/
+
+ minwrk = 1;
+ if (*info == 0 && ((*lwork >= 1) || (lquery))) {
+ maxwrk = *n + *n * ilaenv_(&c__1, "CGEHRD", " ", n, &c__1, n, &c__0, (
+ ftnlen)6, (ftnlen)1);
+ if (! wantvl && ! wantvr) {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (1);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "CHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "CHSEQR", "EN", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+ maxwrk = max(maxwrk,hswork);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (1);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + (*n - 1) * ilaenv_(&c__1, "CUNGHR",
+ " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "CHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "CHSEQR", "SV", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = max(maxwrk,hswork), i__2 = (*n) << (1);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ }
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ clascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/*
+ Balance the matrix
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ ibal = 1;
+ cgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &rwork[ibal], &ierr);
+
+/*
+ Reduce to upper Hessenberg form
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: none)
+*/
+
+ itau = 1;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ cgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/*
+ Want left eigenvectors
+ Copy Householder vectors to VL
+*/
+
+ *(unsigned char *)side = 'L';
+ clacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/*
+ Generate unitary matrix in VL
+ (CWorkspace: need 2*N-1, prefer N+(N-1)*NB)
+ (RWorkspace: none)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VL
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vl[
+ vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/*
+ Want left and right eigenvectors
+ Copy Schur vectors to VR
+*/
+
+ *(unsigned char *)side = 'B';
+ clacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/*
+ Want right eigenvectors
+ Copy Householder vectors to VR
+*/
+
+ *(unsigned char *)side = 'R';
+ clacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/*
+ Generate unitary matrix in VR
+ (CWorkspace: need 2*N-1, prefer N+(N-1)*NB)
+ (RWorkspace: none)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ cunghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VR
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/*
+ Compute eigenvalues only
+ (CWorkspace: need 1, prefer HSWORK (see comments) )
+ (RWorkspace: none)
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ chseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &w[1], &vr[
+ vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from CHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if ((wantvl) || (wantvr)) {
+
+/*
+ Compute left and/or right eigenvectors
+ (CWorkspace: need 2*N)
+ (RWorkspace: need 2*N)
+*/
+
+ irwork = ibal + *n;
+ ctrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &rwork[irwork],
+ &ierr);
+ }
+
+ if (wantvl) {
+
+/*
+ Undo balancing of left eigenvectors
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ cgebak_("B", "L", n, &ilo, &ihi, &rwork[ibal], n, &vl[vl_offset],
+ ldvl, &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vl_dim1;
+/* Computing 2nd power */
+ r__1 = vl[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vl[k + i__ * vl_dim1]);
+ rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &rwork[irwork], &c__1);
+ r_cnjg(&q__2, &vl[k + i__ * vl_dim1]);
+ r__1 = sqrt(rwork[irwork + k - 1]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vl[i__ * vl_dim1 + 1], &c__1);
+ i__2 = k + i__ * vl_dim1;
+ i__3 = k + i__ * vl_dim1;
+ r__1 = vl[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vl[i__2].r = q__1.r, vl[i__2].i = q__1.i;
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/*
+ Undo balancing of right eigenvectors
+ (CWorkspace: none)
+ (RWorkspace: need N)
+*/
+
+ cgebak_("B", "R", n, &ilo, &ihi, &rwork[ibal], n, &vr[vr_offset],
+ ldvr, &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scl = 1.f / scnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ csscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + i__ * vr_dim1;
+/* Computing 2nd power */
+ r__1 = vr[i__3].r;
+/* Computing 2nd power */
+ r__2 = r_imag(&vr[k + i__ * vr_dim1]);
+ rwork[irwork + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &rwork[irwork], &c__1);
+ r_cnjg(&q__2, &vr[k + i__ * vr_dim1]);
+ r__1 = sqrt(rwork[irwork + k - 1]);
+ q__1.r = q__2.r / r__1, q__1.i = q__2.i / r__1;
+ tmp.r = q__1.r, tmp.i = q__1.i;
+ cscal_(n, &tmp, &vr[i__ * vr_dim1 + 1], &c__1);
+ i__2 = k + i__ * vr_dim1;
+ i__3 = k + i__ * vr_dim1;
+ r__1 = vr[i__3].r;
+ q__1.r = r__1, q__1.i = 0.f;
+ vr[i__2].r = q__1.r, vr[i__2].i = q__1.i;
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[*info + 1]
+ , &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ clascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &w[1], n,
+ &ierr);
+ }
+ }
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEEV */
+
+} /* cgeev_ */
+
+/* Subroutine */ int cgehd2_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__;
+ static complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGEHD2 reduces a complex general matrix A to upper Hessenberg form H
+ by a unitary similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to CGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= max(1,N).
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the n by n general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the unitary matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[
+ i__]);
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ clarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i)' to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of CGEHD2 */
+
+} /* cgehd2_ */
+
+/* Subroutine */ int cgehrd_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__;
+ static complex t[4160] /* was [65][64] */;
+ static integer ib;
+ static complex ei;
+ static integer nb, nh, nx, iws;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int cgehd2_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clahrd_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGEHRD reduces a complex general matrix A to upper Hessenberg form H
+ by a unitary similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to CGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the N-by-N general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the unitary matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
+ zero.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ i__2 = i__;
+ tau[i__2].r = 0.f, tau[i__2].i = 0.f;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEHRD", " ", n, ilo, ihi, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if ((nb < nbmin) || (nb >= nh)) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/*
+ Reduce columns i:i+ib-1 to Hessenberg form, returning the
+ matrices V and T of the block reflector H = I - V*T*V'
+ which performs the reduction, and also the matrix Y = A*V*T
+*/
+
+ clahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/*
+ Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
+ right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
+ to 1.
+*/
+
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ ei.r = a[i__3].r, ei.i = a[i__3].i;
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ i__3 = *ihi - i__ - ib + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", ihi, &i__3, &ib, &
+ q__1, &work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda,
+ &c_b56, &a[(i__ + ib) * a_dim1 + 1], lda);
+ i__3 = i__ + ib + (i__ + ib - 1) * a_dim1;
+ a[i__3].r = ei.r, a[i__3].i = ei.i;
+
+/*
+ Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
+ left
+*/
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ clarfb_("Left", "Conjugate transpose", "Forward", "Columnwise", &
+ i__3, &i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &
+ c__65, &a[i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &
+ ldwork);
+/* L30: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ cgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1].r = (real) iws, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGEHRD */
+
+} /* cgehrd_ */
+
+/* Subroutine */ int cgelq2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGELQ2 computes an LQ factorization of a complex m by n matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m by min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)', where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in
+ A(i,i+1:n), and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &tau[i__]
+ );
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ clarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+/* L10: */
+ }
+ return 0;
+
+/* End of CGELQ2 */
+
+} /* cgelq2_ */
+
+/* Subroutine */ int cgelqf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgelq2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGELQF computes an LQ factorization of a complex M-by-N matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m-by-min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)', where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; conjg(v(i+1:n)) is stored on exit in
+ A(i,i+1:n), and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *m * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGELQF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGELQF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the LQ factorization of the current block
+ A(i:i+ib-1,i:n)
+*/
+
+ i__3 = *n - i__ + 1;
+ cgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *n - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ clarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGELQF */
+
+} /* cgelqf_ */
+
+/* Subroutine */ int cgelsd_(integer *m, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, real *s, real *rcond,
+ integer *rank, complex *work, integer *lwork, real *rwork, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ static integer ie, il, mm;
+ static real eps, anrm, bnrm;
+ static integer itau, iascl, ibscl;
+ static real sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *), slabad_(real *, real *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clalsd_(
+ char *, integer *, integer *, integer *, real *, real *, complex *
+ , integer *, real *, integer *, complex *, real *, integer *,
+ integer *), clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), cunmbr_(char *, char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), slaset_(
+ char *, integer *, integer *, real *, real *, real *, integer *), cunmlq_(char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, complex *,
+ integer *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ static integer minwrk, maxwrk;
+ static real smlnum;
+ static logical lquery;
+ static integer nrwork, smlsiz;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ CGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder tranformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution matrix X.
+ If m >= n and RANK = n, the residual sum-of-squares for
+ the solution in the i-th column is given by the sum of
+ squares of elements n+1:m in that column.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M,N).
+
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
+
+ RCOND (input) REAL
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
+
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 2 * N + N * NRHS
+ if M is greater than or equal to N or
+ 2 * M + M * NRHS
+ if M is less than N, the code will execute correctly.
+ For good performance, LWORK should generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+
+ RWORK (workspace) REAL array, dimension at least
+ 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS +
+ (SMLSIZ+1)**2
+ if M is greater than or equal to N or
+ 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS +
+ (SMLSIZ+1)**2
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+
+ IWORK (workspace) INTEGER array, dimension (LIWORK)
+ LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
+ where MINMN = MIN( M,N ).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input arguments.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "CGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "CGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ minwrk = 1;
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *nrhs * ilaenv_(&c__1, "CUNMQR", "LC", m,
+ nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/*
+ Path 1 - overdetermined or exactly determined.
+
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (mm + *n) * ilaenv_(&c__1,
+ "CGEBRD", " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1)
+ ;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)
+ 3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * *nrhs;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ((*n) << (1)) + mm, i__2 = ((*n) << (1)) + *n * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &c_n1,
+ &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
+ * ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
+ ilaenv_(&c__1, "CUNMBR", "QLC", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
+ ilaenv_(&c__1, "CUNMLQ", "LC", n, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - underdetermined. */
+
+ maxwrk = ((*m) << (1)) + (*n + *m) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *nrhs * ilaenv_(&c__1,
+ "CUNMBR", "QLC", m, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = ((*m) << (1)) + *n, i__2 = ((*m) << (1)) + *m * *nrhs;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ r__1 = (real) maxwrk;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if ((*m == 0) || (*n == 0)) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM */
+
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ claset_("F", &i__1, nrhs, &c_b55, &c_b55, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b320, &c_b320, &s[1], &c__1)
+ ;
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = clange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ clascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure B(M+1:N,:) = 0 */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b55, &c_b55, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (RWorkspace: need N)
+ (CWorkspace: need N, prefer N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (RWorkspace: need N)
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmqr_("L", "C", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b55, &c_b55, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+ ie = 1;
+ nrwork = ie + *n;
+
+/*
+ Bidiagonalize R in A.
+ (RWorkspace: need N)
+ (CWorkspace: need 2*N+MM, prefer 2*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(&mm, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (CWorkspace: need 2*N+NRHS, prefer 2*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, n, nrhs, &s[1], &rwork[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
+ max(i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2))
+ {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
+ lda + *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (CWorkspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b55, &c_b55, &work[il + ldwork], &
+ ldwork);
+ itauq = il + ldwork * *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (RWorkspace: need M)
+ (CWorkspace: need M*M+4*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwork, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (CWorkspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("U", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ claset_("F", &i__1, nrhs, &c_b55, &c_b55, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (CWorkspace: need NRHS, prefer NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmlq_("L", "C", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+ ie = 1;
+ nrwork = ie + *m;
+
+/*
+ Bidiagonalize A.
+ (RWorkspace: need M)
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (CWorkspace: need 2*M+NRHS, prefer 2*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "C", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ clalsd_("L", &smlsiz, m, nrhs, &s[1], &rwork[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &rwork[nrwork], &iwork[1],
+ info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ clascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ clascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ r__1 = (real) maxwrk;
+ q__1.r = r__1, q__1.i = 0.f;
+ work[1].r = q__1.r, work[1].i = q__1.i;
+ return 0;
+
+/* End of CGELSD */
+
+} /* cgelsd_ */
+
+/* Subroutine */ int cgeqr2_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, k;
+ static complex alpha;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *),
+ clarfg_(integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGEQR2 computes a QR factorization of a complex m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) COMPLEX array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i)' to A(i:m,i+1:n) from the left */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &q__1,
+ &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = alpha.r, a[i__2].i = alpha.i;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CGEQR2 */
+
+} /* cgeqr2_ */
+
+/* Subroutine */ int cgeqrf_(integer *m, integer *n, complex *a, integer *lda,
+ complex *tau, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cgeqr2_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *), clarfb_(char *, char
+ *, char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CGEQRF computes a QR factorization of a complex M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the unitary matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) COMPLEX array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the QR factorization of the current block
+ A(i:m,i:i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ cgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ clarfb_("Left", "Conjugate transpose", "Forward", "Columnwise"
+ , &i__3, &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &
+ work[1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda,
+ &work[ib + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ cgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CGEQRF */
+
+} /* cgeqrf_ */
+
+/* Subroutine */ int cgesdd_(char *jobz, integer *m, integer *n, complex *a,
+ integer *lda, real *s, complex *u, integer *ldu, complex *vt, integer
+ *ldvt, complex *work, integer *lwork, real *rwork, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, ie, il, ir, iu, blk;
+ static real dum[1], eps;
+ static integer iru, ivt, iscl;
+ static real anrm;
+ static integer idum[1], ierr, itau, irvt;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ static integer chunk, minmn, wrkbl, itaup, itauq;
+ static logical wntqa;
+ static integer nwork;
+ extern /* Subroutine */ int clacp2_(char *, integer *, integer *, real *,
+ integer *, complex *, integer *);
+ static logical wntqn, wntqo, wntqs;
+ static integer mnthr1, mnthr2;
+ extern /* Subroutine */ int cgebrd_(integer *, integer *, complex *,
+ integer *, real *, real *, complex *, complex *, complex *,
+ integer *, integer *);
+ extern doublereal clange_(char *, integer *, integer *, complex *,
+ integer *, real *);
+ extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, integer *), clacrm_(
+ integer *, integer *, complex *, integer *, real *, integer *,
+ complex *, integer *, real *), clarcm_(integer *, integer *, real
+ *, integer *, complex *, integer *, complex *, integer *, real *),
+ clascl_(char *, integer *, integer *, real *, real *, integer *,
+ integer *, complex *, integer *, integer *), sbdsdc_(char
+ *, char *, integer *, real *, real *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *, integer *), cgeqrf_(integer *, integer *, complex *, integer
+ *, complex *, complex *, integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), claset_(char *,
+ integer *, integer *, complex *, complex *, complex *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int cungbr_(char *, integer *, integer *, integer
+ *, complex *, integer *, complex *, complex *, integer *, integer
+ *);
+ static real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), cunmbr_(char *, char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunglq_(
+ integer *, integer *, integer *, complex *, integer *, complex *,
+ complex *, integer *, integer *);
+ static integer ldwrkl;
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ static integer ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
+ static real smlnum;
+ static logical wntqas, lquery;
+ static integer nrwork;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ CGESDD computes the singular value decomposition (SVD) of a complex
+ M-by-N matrix A, optionally computing the left and/or right singular
+ vectors, by using divide-and-conquer method. The SVD is written
+
+ A = U * SIGMA * conjugate-transpose(V)
+
+ where SIGMA is an M-by-N matrix which is zero except for its
+ min(m,n) diagonal elements, U is an M-by-M unitary matrix, and
+ V is an N-by-N unitary matrix. The diagonal elements of SIGMA
+ are the singular values of A; they are real and non-negative, and
+ are returned in descending order. The first min(m,n) columns of
+ U and V are the left and right singular vectors of A.
+
+ Note that the routine returns VT = V**H, not V.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ Specifies options for computing all or part of the matrix U:
+ = 'A': all M columns of U and all N rows of V**H are
+ returned in the arrays U and VT;
+ = 'S': the first min(M,N) columns of U and the first
+ min(M,N) rows of V**H are returned in the arrays U
+ and VT;
+ = 'O': If M >= N, the first N columns of U are overwritten
+ on the array A and all rows of V**H are returned in
+ the array VT;
+ otherwise, all columns of U are returned in the
+ array U and the first M rows of V**H are overwritten
+ in the array VT;
+ = 'N': no columns of U or rows of V**H are computed.
+
+ M (input) INTEGER
+ The number of rows of the input matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the input matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit,
+ if JOBZ = 'O', A is overwritten with the first N columns
+ of U (the left singular vectors, stored
+ columnwise) if M >= N;
+ A is overwritten with the first M rows
+ of V**H (the right singular vectors, stored
+ rowwise) otherwise.
+ if JOBZ .ne. 'O', the contents of A are destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A, sorted so that S(i) >= S(i+1).
+
+ U (output) COMPLEX array, dimension (LDU,UCOL)
+ UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
+ UCOL = min(M,N) if JOBZ = 'S'.
+ If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
+ unitary matrix U;
+ if JOBZ = 'S', U contains the first min(M,N) columns of U
+ (the left singular vectors, stored columnwise);
+ if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1; if
+ JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
+
+ VT (output) COMPLEX array, dimension (LDVT,N)
+ If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
+ N-by-N unitary matrix V**H;
+ if JOBZ = 'S', VT contains the first min(M,N) rows of
+ V**H (the right singular vectors, stored rowwise);
+ if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1; if
+ JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
+ if JOBZ = 'S', LDVT >= min(M,N).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ if JOBZ = 'N', LWORK >= 2*min(M,N)+max(M,N).
+ if JOBZ = 'O',
+ LWORK >= 2*min(M,N)*min(M,N)+2*min(M,N)+max(M,N).
+ if JOBZ = 'S' or 'A',
+ LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N).
+ For good performance, LWORK should generally be larger.
+ If LWORK < 0 but other input arguments are legal, WORK(1)
+ returns the optimal LWORK.
+
+ RWORK (workspace) REAL array, dimension (LRWORK)
+ If JOBZ = 'N', LRWORK >= 7*min(M,N).
+ Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 5*min(M,N)
+
+ IWORK (workspace) INTEGER array, dimension (8*min(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The updating process of SBDSDC did not converge.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr1 = (integer) (minmn * 17.f / 9.f);
+ mnthr2 = (integer) (minmn * 5.f / 3.f);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = (wntqa) || (wntqs);
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+ lquery = *lwork == -1;
+
+ if (! ((((wntqa) || (wntqs)) || (wntqo)) || (wntqn))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (((*ldu < 1) || (wntqas && *ldu < *m)) || (wntqo && *m < *n && *
+ ldu < *m)) {
+ *info = -8;
+ } else if ((((*ldvt < 1) || (wntqa && *ldvt < *n)) || (wntqs && *ldvt <
+ minmn)) || (wntqo && *m >= *n && *ldvt < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ CWorkspace refers to complex workspace, and RWorkspace to
+ real workspace. NB refers to the optimal block size for the
+ immediately following subroutine, as returned by ILAENV.)
+*/
+
+ if (*info == 0 && *m > 0 && *n > 0) {
+ if (*m >= *n) {
+
+/*
+ There is no complex work space needed for bidiagonal SVD
+ The real work space needed for bidiagonal SVD is BDSPAC,
+ BDSPAC = 3*N*N + 4*N
+*/
+
+ if (*m >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl;
+ minwrk = *n * 3;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *n * *n + wrkbl;
+ minwrk = ((*n) << (1)) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "CUNGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "CGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "CUNGQR",
+ " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + ((*n) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", n, n, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *n * *n + wrkbl;
+ minwrk = *n * *n + ((*n) << (1)) + *m;
+ }
+ } else if (*m >= mnthr2) {
+
+/* Path 5 (M much larger than N, but not as much as MNTHR1) */
+
+ maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*n) << (1)) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6 (M at least N, but not much larger) */
+
+ maxwrk = ((*n) << (1)) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*n) << (1)) + *m;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *n * *n;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "PRC", n, n, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ } else {
+
+/*
+ There is no complex work space needed for bidiagonal SVD
+ The real work space needed for bidiagonal SVD is BDSPAC,
+ BDSPAC = 3*M*M + 4*M
+*/
+
+ if (*n >= mnthr1) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *n + *m * *m + wrkbl;
+ minwrk = ((*m) << (1)) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "CUNGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "CGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "CUNGLQ",
+ " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + ((*m) << (1)) *
+ ilaenv_(&c__1, "CGEBRD", " ", m, m, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ wrkbl = max(i__1,i__2);
+ maxwrk = *m * *m + wrkbl;
+ minwrk = *m * *m + ((*m) << (1)) + *n;
+ }
+ } else if (*n >= mnthr2) {
+
+/* Path 5t (N much larger than M, but not as much as MNTHR1) */
+
+ maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*m) << (1)) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "P", m, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "P", n, n, m, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "Q", m, m, n, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ maxwrk = max(i__1,i__2);
+ }
+ } else {
+
+/* Path 6t (N greater than M, but not much larger) */
+
+ maxwrk = ((*m) << (1)) + (*m + *n) * ilaenv_(&c__1, "CGEBRD",
+ " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ minwrk = ((*m) << (1)) + *n;
+ if (wntqo) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "PRC", m, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNMBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ maxwrk += *m * *n;
+ minwrk += *m * *m;
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "PRC", m, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *n * ilaenv_(&c__1,
+ "CUNGBR", "PRC", n, n, m, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*m) << (1)) + *m * ilaenv_(&c__1,
+ "CUNGBR", "QLN", m, m, n, &c_n1, (ftnlen)6, (
+ ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ }
+ }
+ }
+ maxwrk = max(maxwrk,minwrk);
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+ }
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ if (*lwork >= 1) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = clange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ clascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/*
+ A has at least as many rows as columns. If A has sufficiently
+ more rows than columns, first reduce using the QR
+ decomposition (if sufficient workspace available)
+*/
+
+ if (*m >= mnthr1) {
+
+ if (wntqn) {
+
+/*
+ Path 1 (M much larger than N, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: need 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b55, &c_b55, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (CWorkspace: need 3*N, prefer 2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nrwork = ie + *n;
+
+/*
+ Perform bidiagonal SVD, compute singular values only
+ (CWorkspace: 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2 (M much larger than N, JOBZ='O')
+ N left singular vectors to be overwritten on A and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ ir = iu + ldwrku * *n;
+ if (*lwork >= *m * *n + *n * *n + *n * 3) {
+
+/* WORK(IR) is M by N */
+
+ ldwrkr = *m;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need N*N+2*N, prefer M*N+N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK( IR ), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ claset_("L", &i__1, &i__2, &c_b55, &c_b55, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of R in WORK(IRU) and computing right singular vectors
+ of R in WORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by the left singular vectors of R
+ (CWorkspace: need 2*N*N+3*N, prefer M*N+N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by the right singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IU), storing result in WORK(IR) and copying to A
+ (CWorkspace: need 2*N*N, prefer N*N+M*N)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ cgemm_("N", "N", &chunk, n, n, &c_b56, &a[i__ + a_dim1],
+ lda, &work[iu], &ldwrku, &c_b55, &work[ir], &
+ ldwrkr);
+ clacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3 (M much larger than N, JOBZ='S')
+ N left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (CWorkspace: need N*N+2*N, prefer N*N+N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ clacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ claset_("L", &i__2, &i__1, &c_b55, &c_b55, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cungqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IR), storing result in U
+ (CWorkspace: need N*N)
+ (RWorkspace: 0)
+*/
+
+ clacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ cgemm_("N", "N", m, n, n, &c_b56, &a[a_offset], lda, &work[ir]
+ , &ldwrkr, &c_b55, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/*
+ Path 4 (M much larger than N, JOBZ='A')
+ M left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R, copying result to U
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Generate Q in U
+ (CWorkspace: need N+M, prefer N+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cungqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out below it */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ claset_("L", &i__2, &i__1, &c_b55, &c_b55, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (CWorkspace: need 3*N, prefer 2*N+2*N*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(n, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ iru = ie + *n;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by left singular vectors of R
+ (CWorkspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of R
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in U by left singular vectors of R in
+ WORK(IU), storing result in A
+ (CWorkspace: need N*N)
+ (RWorkspace: 0)
+*/
+
+ cgemm_("N", "N", m, n, n, &c_b56, &u[u_offset], ldu, &work[iu]
+ , &ldwrku, &c_b55, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else if (*m >= mnthr2) {
+
+/*
+ MNTHR2 <= M < MNTHR1
+
+ Path 5 (M much larger than N, but not as much as MNTHR1)
+ Reduce to bidiagonal form without QR decomposition, use
+ CUNGBR and matrix multiplication to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB)
+ (RWorkspace: need N)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Generate Q in A
+ (CWorkspace: need 2*N, prefer N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK(IU) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in WORK(IU), copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &work[iu]
+ , &ldwrku, &rwork[nrwork]);
+ clacpy_("F", n, n, &work[iu], &ldwrku, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need N*N, prefer M*N)
+ (Rworkspace: need 3*N*N, prefer N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ i__2 = *m;
+ i__1 = ldwrku;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ clacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru], n,
+ &work[iu], &ldwrku, &rwork[nrwork]);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ clacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ } else {
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("U", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", n, n, n, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ clarcm_(n, n, &rwork[irvt], n, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", n, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: 0)
+ (Rworkspace: need 3*N*N)
+*/
+
+ nrwork = irvt;
+ clacrm_(m, n, &u[u_offset], ldu, &rwork[iru], n, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+ }
+
+ } else {
+
+/*
+ M .LT. MNTHR2
+
+ Path 6 (M at least N, but not much larger)
+ Reduce to bidiagonal form without QR decomposition
+ Use CUNMBR to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *n;
+ itauq = 1;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*N+M, prefer 2*N+(M+N)*NB)
+ (RWorkspace: need N)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "N", n, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ if (*lwork >= *m * *n + *n * 3) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ } else {
+
+/* WORK( IU ) is LDWRKU by N */
+
+ ldwrku = (*lwork - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3) {
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by left singular vectors of A, copying
+ to A
+ (Cworkspace: need M*N+2*N, prefer M*N+N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ claset_("F", m, n, &c_b55, &c_b55, &work[iu], &ldwrku);
+ clacp2_("F", n, n, &rwork[iru], n, &work[iu], &ldwrku);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__1, &
+ ierr);
+ clacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate Q in A
+ (Cworkspace: need 2*N, prefer N+N*NB)
+ (Rworkspace: need 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__1, &ierr);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need N*N, prefer M*N)
+ (Rworkspace: need 3*N*N, prefer N*N+2*M*N)
+*/
+
+ nrwork = irvt;
+ i__1 = *m;
+ i__2 = ldwrku;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrku);
+ clacrm_(&chunk, n, &a[i__ + a_dim1], lda, &rwork[iru],
+ n, &work[iu], &ldwrku, &rwork[nrwork]);
+ clacpy_("F", &chunk, n, &work[iu], &ldwrku, &a[i__ +
+ a_dim1], lda);
+/* L30: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ claset_("F", m, n, &c_b55, &c_b55, &u[u_offset], ldu);
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ } else {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = nrwork;
+ irvt = iru + *n * *n;
+ nrwork = irvt + *n * *n;
+ sbdsdc_("U", "I", n, &s[1], &rwork[ie], &rwork[iru], n, &
+ rwork[irvt], n, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ claset_("F", m, m, &c_b55, &c_b55, &u[u_offset], ldu);
+ i__2 = *m - *n;
+ i__1 = *m - *n;
+ claset_("F", &i__2, &i__1, &c_b55, &c_b56, &u[*n + 1 + (*n +
+ 1) * u_dim1], ldu);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 2*N+M, prefer 2*N+M*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[iru], n, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*N, prefer 2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", n, n, &rwork[irvt], n, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/*
+ A has more columns than rows. If A has sufficiently more
+ columns than rows, first reduce using the LQ decomposition
+ (if sufficient workspace available)
+*/
+
+ if (*n >= mnthr1) {
+
+ if (wntqn) {
+
+/*
+ Path 1t (N much larger than M, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Zero out above L */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ claset_("U", &i__2, &i__1, &c_b55, &c_b55, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = 1;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (CWorkspace: need 3*M, prefer 2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+ nrwork = ie + *m;
+
+/*
+ Perform bidiagonal SVD, compute singular values only
+ (CWorkspace: 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("U", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2t (N much larger than M, JOBZ='O')
+ M right singular vectors to be overwritten on A and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+ ldwkvt = *m;
+
+/* WORK(IVT) is M by M */
+
+ il = ivt + ldwkvt * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3) {
+
+/* WORK(IL) M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+
+/* WORK(IL) is M by CHUNK */
+
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+ itau = il + ldwrkl * chunk;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ claset_("U", &i__2, &i__1, &c_b55, &c_b55, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix WORK(IU)
+ Overwrite WORK(IU) by the left singular vectors of L
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by the right singular vectors of L
+ (CWorkspace: need N*N+3*N, prefer M*N+2*N+N*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IL) by Q
+ in A, storing result in WORK(IL) and copying to A
+ (CWorkspace: need 2*M*M, prefer M*M+M*N))
+ (RWorkspace: 0)
+*/
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ cgemm_("N", "N", m, &blk, m, &c_b56, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b55, &work[il], &
+ ldwrkl);
+ clacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L40: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3t (N much larger than M, JOBZ='S')
+ M right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ clacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b55, &c_b55, &work[il + ldwrkl],
+ &ldwrkl);
+
+/*
+ Generate Q in A
+ (CWorkspace: need M*M+2*M, prefer M*M+M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &work[il], &ldwrkl, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by left singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Copy VT to WORK(IL), multiply right singular vectors of L
+ in WORK(IL) by Q in A, storing result in VT
+ (CWorkspace: need M*M)
+ (RWorkspace: 0)
+*/
+
+ clacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ cgemm_("N", "N", m, n, m, &c_b56, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b55, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/*
+ Path 9t (N much larger than M, JOBZ='A')
+ N right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q, copying result to VT
+ (CWorkspace: need 2*M, prefer M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Generate Q in VT
+ (CWorkspace: need M+N, prefer M+N*NB)
+ (RWorkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cunglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__1, &ierr);
+
+/* Produce L in A, zeroing out above it */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ claset_("U", &i__1, &i__2, &c_b55, &c_b55, &a[((a_dim1) << (1)
+ ) + 1], lda);
+ ie = 1;
+ itauq = itau;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+2*M*NB)
+ (RWorkspace: need M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, m, &a[a_offset], lda, &s[1], &rwork[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ iru = ie + *m;
+ irvt = iru + *m * *m;
+ nrwork = irvt + *m * *m;
+ sbdsdc_("U", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of L
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by right singular vectors of L
+ (CWorkspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+ (RWorkspace: 0)
+*/
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by
+ Q in VT, storing result in A
+ (CWorkspace: need M*M)
+ (RWorkspace: 0)
+*/
+
+ cgemm_("N", "N", m, n, m, &c_b56, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b55, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else if (*n >= mnthr2) {
+
+/*
+ MNTHR2 <= N < MNTHR1
+
+ Path 5t (N much larger than M, but not as much as MNTHR1)
+ Reduce to bidiagonal form without QR decomposition, use
+ CUNGBR and matrix multiplication to compute singular vectors
+*/
+
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+ (RWorkspace: M)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, &ierr);
+
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ sbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ ivt = nwork;
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__1, &ierr);
+
+/*
+ Generate P**H in A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &work[
+ nwork], &i__1, &ierr);
+
+ ldwkvt = *m;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ nwork = ivt + ldwkvt * *n;
+ chunk = *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRVT)
+ storing the result in WORK(IVT), copying to U
+ (Cworkspace: need 0)
+ (Rworkspace: need 2*M*M)
+*/
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &work[ivt], &
+ ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, m, &work[ivt], &ldwkvt, &u[u_offset], ldu);
+
+/*
+ Multiply RWORK(IRVT) by P**H in A, storing the
+ result in WORK(IVT), copying to A
+ (CWorkspace: need M*M, prefer M*N)
+ (Rworkspace: need 2*M*M, prefer 2*M*N)
+*/
+
+ nrwork = iru;
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ clarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1],
+ lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L50: */
+ }
+ } else if (wntqs) {
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need 3*M*M)
+*/
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need M*M+2*M*N)
+*/
+
+ nrwork = iru;
+ clarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ } else {
+
+/*
+ Copy A to U, generate Q
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("L", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("Q", m, m, n, &u[u_offset], ldu, &work[itauq], &work[
+ nwork], &i__2, &ierr);
+
+/*
+ Copy A to VT, generate P**H
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: 0)
+*/
+
+ clacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", n, n, m, &vt[vt_offset], ldvt, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Multiply Q in U by real matrix RWORK(IRU), storing the
+ result in A, copying to U
+ (CWorkspace: need 0)
+ (Rworkspace: need 3*M*M)
+*/
+
+ clacrm_(m, m, &u[u_offset], ldu, &rwork[iru], m, &a[a_offset],
+ lda, &rwork[nrwork]);
+ clacpy_("F", m, m, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Multiply real matrix RWORK(IRVT) by P**H in VT,
+ storing the result in A, copying to VT
+ (Cworkspace: need 0)
+ (Rworkspace: need M*M+2*M*N)
+*/
+
+ clarcm_(m, n, &rwork[irvt], m, &vt[vt_offset], ldvt, &a[
+ a_offset], lda, &rwork[nrwork]);
+ clacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+ }
+
+ } else {
+
+/*
+ N .LT. MNTHR2
+
+ Path 6t (N greater than M, but not much larger)
+ Reduce to bidiagonal form without LQ decomposition
+ Use CUNMBR to compute singular vectors
+*/
+
+ ie = 1;
+ nrwork = ie + *m;
+ itauq = 1;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (CWorkspace: need 2*M+N, prefer 2*M+(M+N)*NB)
+ (RWorkspace: M)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cgebrd_(m, n, &a[a_offset], lda, &s[1], &rwork[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Compute singular values only
+ (Cworkspace: 0)
+ (Rworkspace: need BDSPAC)
+*/
+
+ sbdsdc_("L", "N", m, &s[1], &rwork[ie], dum, &c__1, dum, &
+ c__1, dum, idum, &rwork[nrwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3) {
+
+/* WORK( IVT ) is M by N */
+
+ claset_("F", m, n, &c_b55, &c_b55, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by CHUNK */
+
+ chunk = (*lwork - *m * 3) / *m;
+ nwork = ivt + ldwkvt * chunk;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3) {
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix WORK(IVT)
+ Overwrite WORK(IVT) by right singular vectors of A,
+ copying to A
+ (Cworkspace: need M*N+2*M, prefer M*N+M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ clacp2_("F", m, m, &rwork[irvt], m, &work[ivt], &ldwkvt);
+ i__2 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+ clacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate P**H in A
+ (Cworkspace: need 2*M, prefer M+M*NB)
+ (Rworkspace: need 0)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ cungbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by real matrix RWORK(IRU), storing the
+ result in WORK(IU), copying to A
+ (CWorkspace: need M*M, prefer M*N)
+ (Rworkspace: need 3*M*M, prefer M*M+2*M*N)
+*/
+
+ nrwork = iru;
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ clarcm_(m, &blk, &rwork[irvt], m, &a[i__ * a_dim1 + 1]
+ , lda, &work[ivt], &ldwkvt, &rwork[nrwork]);
+ clacpy_("F", m, &blk, &work[ivt], &ldwkvt, &a[i__ *
+ a_dim1 + 1], lda);
+/* L60: */
+ }
+ }
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ claset_("F", m, n, &c_b55, &c_b55, &vt[vt_offset], ldvt);
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in RWORK(IRU) and computing right
+ singular vectors of bidiagonal matrix in RWORK(IRVT)
+ (CWorkspace: need 0)
+ (RWorkspace: need BDSPAC)
+*/
+
+ irvt = nrwork;
+ iru = irvt + *m * *m;
+ nrwork = iru + *m * *m;
+
+ sbdsdc_("L", "I", m, &s[1], &rwork[ie], &rwork[iru], m, &
+ rwork[irvt], m, dum, idum, &rwork[nrwork], &iwork[1],
+ info);
+
+/*
+ Copy real matrix RWORK(IRU) to complex matrix U
+ Overwrite U by left singular vectors of A
+ (CWorkspace: need 3*M, prefer 2*M+M*NB)
+ (RWorkspace: M*M)
+*/
+
+ clacp2_("F", m, m, &rwork[iru], m, &u[u_offset], ldu);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+
+/* Set the right corner of VT to identity matrix */
+
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ claset_("F", &i__1, &i__2, &c_b55, &c_b56, &vt[*m + 1 + (*m +
+ 1) * vt_dim1], ldvt);
+
+/*
+ Copy real matrix RWORK(IRVT) to complex matrix VT
+ Overwrite VT by right singular vectors of A
+ (CWorkspace: need 2*M+N, prefer 2*M+N*NB)
+ (RWorkspace: M*M)
+*/
+
+ claset_("F", n, n, &c_b55, &c_b55, &vt[vt_offset], ldvt);
+ clacp2_("F", m, m, &rwork[irvt], m, &vt[vt_offset], ldvt);
+ i__1 = *lwork - nwork + 1;
+ cunmbr_("P", "R", "C", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1].r = (real) maxwrk, work[1].i = 0.f;
+
+ return 0;
+
+/* End of CGESDD */
+
+} /* cgesdd_ */
+
+/* Subroutine */ int cgesv_(integer *n, integer *nrhs, complex *a, integer *
+ lda, integer *ipiv, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgetrf_(integer *, integer *, complex *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), cgetrs_(char *, integer *, integer *, complex *, integer
+ *, integer *, complex *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ CGESV computes the solution to a complex system of linear equations
+ A * X = B,
+ where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
+
+ The LU decomposition with partial pivoting and row interchanges is
+ used to factor A as
+ A = P * L * U,
+ where P is a permutation matrix, L is unit lower triangular, and U is
+ upper triangular. The factored form of A is then used to solve the
+ system of equations A * X = B.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of linear equations, i.e., the order of the
+ matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the N-by-N coefficient matrix A.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (output) INTEGER array, dimension (N)
+ The pivot indices that define the permutation matrix P;
+ row i of the matrix was interchanged with row IPIV(i).
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On entry, the N-by-NRHS matrix of right hand side matrix B.
+ On exit, if INFO = 0, the N-by-NRHS solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, so the solution could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ cgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ cgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of CGESV */
+
+} /* cgesv_ */
+
+/* Subroutine */ int cgetf2_(integer *m, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ static integer j, jp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgeru_(integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, integer *), cswap_(
+ integer *, complex *, integer *, complex *, integer *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGETF2 computes an LU factorization of a general m-by-n matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, U(k,k) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + icamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ i__2 = jp + j * a_dim1;
+ if ((a[i__2].r != 0.f) || (a[i__2].i != 0.f)) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ cswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ i__2 = *m - j;
+ c_div(&q__1, &c_b56, &a[j + j * a_dim1]);
+ cscal_(&i__2, &q__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgeru_(&i__2, &i__3, &q__1, &a[j + 1 + j * a_dim1], &c__1, &a[j +
+ (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1], lda)
+ ;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CGETF2 */
+
+} /* cgetf2_ */
+
+/* Subroutine */ int cgetrf_(integer *m, integer *n, complex *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, jb, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ static integer iinfo;
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), cgetf2_(integer *,
+ integer *, complex *, integer *, integer *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGETRF computes an LU factorization of a general M-by-N matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the M-by-N matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ if ((nb <= 1) || (nb >= min(*m,*n))) {
+
+/* Use unblocked code. */
+
+ cgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/*
+ Factor diagonal and subdiagonal blocks and test for exact
+ singularity.
+*/
+
+ i__3 = *m - j + 1;
+ cgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ claswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ claswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b56, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &q__1, &a[j + jb + j * a_dim1], lda, &a[j + (j +
+ jb) * a_dim1], lda, &c_b56, &a[j + jb + (j + jb) *
+ a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CGETRF */
+
+} /* cgetrf_ */
+
+/* Subroutine */ int cgetrs_(char *trans, integer *n, integer *nrhs, complex *
+ a, integer *lda, integer *ipiv, complex *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), xerbla_(char *,
+ integer *), claswp_(integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CGETRS solves a system of linear equations
+ A * X = B, A**T * X = B, or A**H * X = B
+ with a general N-by-N matrix A using the LU factorization computed
+ by CGETRF.
+
+ Arguments
+ =========
+
+ TRANS (input) CHARACTER*1
+ Specifies the form of the system of equations:
+ = 'N': A * X = B (No transpose)
+ = 'T': A**T * X = B (Transpose)
+ = 'C': A**H * X = B (Conjugate transpose)
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The factors L and U from the factorization A = P*L*U
+ as computed by CGETRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (input) INTEGER array, dimension (N)
+ The pivot indices from CGETRF; for 1<=i<=N, row i of the
+ matrix was interchanged with row IPIV(i).
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (notran) {
+
+/*
+ Solve A * X = B.
+
+ Apply row interchanges to the right hand sides.
+*/
+
+ claswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b56, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b56, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A**T * X = B or A**H * X = B.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ ctrsm_("Left", "Upper", trans, "Non-unit", n, nrhs, &c_b56, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", trans, "Unit", n, nrhs, &c_b56, &a[a_offset],
+ lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ claswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of CGETRS */
+
+} /* cgetrs_ */
+
+/* Subroutine */ int cheevd_(char *jobz, char *uplo, integer *n, complex *a,
+ integer *lda, real *w, complex *work, integer *lwork, real *rwork,
+ integer *lrwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real eps;
+ static integer inde;
+ static real anrm;
+ static integer imax;
+ static real rmin, rmax;
+ static integer lopt;
+ static real sigma;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static integer lwmin, liopt;
+ static logical lower;
+ static integer llrwk, lropt;
+ static logical wantz;
+ static integer indwk2, llwrk2;
+ extern doublereal clanhe_(char *, char *, integer *, complex *, integer *,
+ real *);
+ static integer iscale;
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), cstedc_(char *, integer *, real *, real *, complex *,
+ integer *, complex *, integer *, real *, integer *, integer *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int chetrd_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, complex *, integer *, integer *), clacpy_(char *, integer *, integer *, complex *, integer
+ *, complex *, integer *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real bignum;
+ static integer indtau, indrwk, indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ static integer lrwmin;
+ extern /* Subroutine */ int cunmtr_(char *, char *, char *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ static integer llwork;
+ static real smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CHEEVD computes all eigenvalues and, optionally, eigenvectors of a
+ complex Hermitian matrix A. If eigenvectors are desired, it uses a
+ divide and conquer algorithm.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only;
+ = 'V': Compute eigenvalues and eigenvectors.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA, N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of A contains the
+ upper triangular part of the matrix A. If UPLO = 'L',
+ the leading N-by-N lower triangular part of A contains
+ the lower triangular part of the matrix A.
+ On exit, if JOBZ = 'V', then if INFO = 0, A contains the
+ orthonormal eigenvectors of the matrix A.
+ If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
+ or the upper triangle (if UPLO='U') of A, including the
+ diagonal, is destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) REAL array, dimension (N)
+ If INFO = 0, the eigenvalues in ascending order.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK.
+ If N <= 1, LWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LWORK must be at least N + 1.
+ If JOBZ = 'V' and N > 1, LWORK must be at least 2*N + N**2.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace/output) REAL array,
+ dimension (LRWORK)
+ On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK.
+
+ LRWORK (input) INTEGER
+ The dimension of the array RWORK.
+ If N <= 1, LRWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LRWORK must be at least N.
+ If JOBZ = 'V' and N > 1, LRWORK must be at least
+ 1 + 5*N + 2*N**2.
+
+ If LRWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the RWORK array,
+ returns this value as the first entry of the RWORK array, and
+ no error message related to LRWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If N <= 1, LIWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
+ If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the algorithm failed to converge; i
+ off-diagonal elements of an intermediate tridiagonal
+ form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = ((*lwork == -1) || (*lrwork == -1)) || (*liwork == -1);
+
+ *info = 0;
+ if (*n <= 1) {
+ lwmin = 1;
+ lrwmin = 1;
+ liwmin = 1;
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ lwmin = ((*n) << (1)) + *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 5 + 1 + ((i__1 * i__1) << (1));
+ liwmin = *n * 5 + 3;
+ } else {
+ lwmin = *n + 1;
+ lrwmin = *n;
+ liwmin = 1;
+ }
+ lopt = lwmin;
+ lropt = lrwmin;
+ liopt = liwmin;
+ }
+ if (! ((wantz) || (lsame_(jobz, "N")))) {
+ *info = -1;
+ } else if (! ((lower) || (lsame_(uplo, "U")))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHEEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ i__1 = a_dim1 + 1;
+ w[1] = a[i__1].r;
+ if (wantz) {
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = clanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ clascl_(uplo, &c__0, &c__0, &c_b1011, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call CHETRD to reduce Hermitian matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = 1;
+ indwrk = indtau + *n;
+ indrwk = inde + *n;
+ indwk2 = indwrk + *n * *n;
+ llwork = *lwork - indwrk + 1;
+ llwrk2 = *lwork - indwk2 + 1;
+ llrwk = *lrwork - indrwk + 1;
+ chetrd_(uplo, n, &a[a_offset], lda, &w[1], &rwork[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+/* Computing MAX */
+ i__1 = indwrk;
+ r__1 = (real) lopt, r__2 = (real) (*n) + work[i__1].r;
+ lopt = dmax(r__1,r__2);
+
+/*
+ For eigenvalues only, call SSTERF. For eigenvectors, first call
+ CSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
+ tridiagonal matrix, then call CUNMTR to multiply it to the
+ Householder transformations represented as Householder vectors in
+ A.
+*/
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &rwork[inde], info);
+ } else {
+ cstedc_("I", n, &w[1], &rwork[inde], &work[indwrk], n, &work[indwk2],
+ &llwrk2, &rwork[indrwk], &llrwk, &iwork[1], liwork, info);
+ cunmtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ clacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = *n;
+ i__4 = indwk2;
+ i__1 = lopt, i__2 = *n + i__3 * i__3 + (integer) work[i__4].r;
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ if (*info == 0) {
+ imax = *n;
+ } else {
+ imax = *info - 1;
+ }
+ r__1 = 1.f / sigma;
+ sscal_(&imax, &r__1, &w[1], &c__1);
+ }
+
+ work[1].r = (real) lopt, work[1].i = 0.f;
+ rwork[1] = (real) lropt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of CHEEVD */
+
+} /* cheevd_ */
+
+/* Subroutine */ int chetd2_(char *uplo, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ static integer i__;
+ static complex taui;
+ extern /* Subroutine */ int cher2_(char *, integer *, complex *, complex *
+ , integer *, complex *, integer *, complex *, integer *);
+ static complex alpha;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int chemv_(char *, integer *, complex *, complex *
+ , integer *, complex *, integer *, complex *, complex *, integer *
+ ), caxpy_(integer *, complex *, complex *, integer *,
+ complex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int clarfg_(integer *, complex *, complex *,
+ integer *, complex *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ CHETD2 reduces a complex Hermitian matrix A to real symmetric
+ tridiagonal form T by a unitary similarity transformation:
+ Q' * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the unitary
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the unitary matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) REAL array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) COMPLEX array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ i__1 = *n + *n * a_dim1;
+ i__2 = *n + *n * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(1:i-1,i+1)
+*/
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__1].r, alpha.i = a[i__1].i;
+ clarfg_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &taui);
+ i__1 = i__;
+ e[i__1] = alpha.r;
+
+ if ((taui.r != 0.f) || (taui.i != 0.f)) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ chemv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b55, &tau[1], &c__1)
+ ;
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ cdotc_(&q__4, &i__, &tau[1], &c__1, &a[(i__ + 1) * a_dim1 + 1]
+ , &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ caxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__, &q__1, &a[(i__ + 1) * a_dim1 + 1], &c__1, &
+ tau[1], &c__1, &a[a_offset], lda);
+
+ } else {
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__ + i__ * a_dim1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ }
+ i__1 = i__ + (i__ + 1) * a_dim1;
+ i__2 = i__;
+ a[i__1].r = e[i__2], a[i__1].i = 0.f;
+ i__1 = i__ + 1;
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ d__[i__1] = a[i__2].r;
+ i__1 = i__;
+ tau[i__1].r = taui.r, tau[i__1].i = taui.i;
+/* L10: */
+ }
+ i__1 = a_dim1 + 1;
+ d__[1] = a[i__1].r;
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = a_dim1 + 1;
+ i__2 = a_dim1 + 1;
+ r__1 = a[i__2].r;
+ a[i__1].r = r__1, a[i__1].i = 0.f;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(i+2:n,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &
+ taui);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+
+ if ((taui.r != 0.f) || (taui.i != 0.f)) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ chemv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b55, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ q__3.r = -.5f, q__3.i = -0.f;
+ q__2.r = q__3.r * taui.r - q__3.i * taui.i, q__2.i = q__3.r *
+ taui.i + q__3.i * taui.r;
+ i__2 = *n - i__;
+ cdotc_(&q__4, &i__2, &tau[i__], &c__1, &a[i__ + 1 + i__ *
+ a_dim1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = *n - i__;
+ caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2_(uplo, &i__2, &q__1, &a[i__ + 1 + i__ * a_dim1], &c__1,
+ &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda);
+
+ } else {
+ i__2 = i__ + 1 + (i__ + 1) * a_dim1;
+ i__3 = i__ + 1 + (i__ + 1) * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ i__2 = i__ + 1 + i__ * a_dim1;
+ i__3 = i__;
+ a[i__2].r = e[i__3], a[i__2].i = 0.f;
+ i__2 = i__;
+ i__3 = i__ + i__ * a_dim1;
+ d__[i__2] = a[i__3].r;
+ i__2 = i__;
+ tau[i__2].r = taui.r, tau[i__2].i = taui.i;
+/* L20: */
+ }
+ i__1 = *n;
+ i__2 = *n + *n * a_dim1;
+ d__[i__1] = a[i__2].r;
+ }
+
+ return 0;
+
+/* End of CHETD2 */
+
+} /* chetd2_ */
+
+/* Subroutine */ int chetrd_(char *uplo, integer *n, complex *a, integer *lda,
+ real *d__, real *e, complex *tau, complex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ static logical upper;
+ extern /* Subroutine */ int chetd2_(char *, integer *, complex *, integer
+ *, real *, real *, complex *, integer *), cher2k_(char *,
+ char *, integer *, integer *, complex *, complex *, integer *,
+ complex *, integer *, real *, complex *, integer *), clatrd_(char *, integer *, integer *, complex *, integer
+ *, real *, complex *, complex *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CHETRD reduces a complex Hermitian matrix A to real symmetric
+ tridiagonal form T by a unitary similarity transformation:
+ Q**H * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the unitary
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the unitary matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) REAL array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) COMPLEX array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
+ (ftnlen)1);
+ lwkopt = *n * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHETRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "CHETRD", uplo, n, &c_n1, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code by setting NX = N.
+
+ Computing MAX
+*/
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "CHETRD", uplo, n, &c_n1, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/*
+ Reduce the upper triangle of A.
+ Columns 1:kk are handled by the unblocked method.
+*/
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = i__ + nb - 1;
+ clatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(1:i-1,1:i-1), using an
+ update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ * a_dim1
+ + 1], lda, &work[1], &ldwork, &c_b1011, &a[a_offset], lda);
+
+/*
+ Copy superdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j - 1 + j * a_dim1;
+ i__5 = j - 1;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ chetd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = *n - i__ + 1;
+ clatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(i+nb:n,i+nb:n), using
+ an update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = *n - i__ - nb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cher2k_(uplo, "No transpose", &i__3, &nb, &q__1, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b1011, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/*
+ Copy subdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ i__4 = j + 1 + j * a_dim1;
+ i__5 = j;
+ a[i__4].r = e[i__5], a[i__4].i = 0.f;
+ i__4 = j;
+ i__5 = j + j * a_dim1;
+ d__[i__4] = a[i__5].r;
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ chetd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CHETRD */
+
+} /* chetrd_ */
+
+/* Subroutine */ int chseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, complex *h__, integer *ldh, complex *w, complex *z__,
+ integer *ldz, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4[2],
+ i__5, i__6;
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__, j, k, l;
+ static complex s[225] /* was [15][15] */, v[16];
+ static integer i1, i2, ii, nh, nr, ns, nv;
+ static complex vv[16];
+ static integer itn;
+ static complex tau;
+ static integer its;
+ static real ulp, tst1;
+ static integer maxb, ierr;
+ static real unfl;
+ static complex temp;
+ static real ovfl;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer itemp;
+ static real rtemp;
+ static logical initz, wantt, wantz;
+ static real rwork[1];
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *), clarfg_(integer *,
+ complex *, complex *, integer *, complex *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *), clanhs_(char *, integer *,
+ complex *, integer *, real *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), clahqr_(logical *, logical *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, integer *, complex *,
+ integer *, integer *), clacpy_(char *, integer *, integer *,
+ complex *, integer *, complex *, integer *), claset_(char
+ *, integer *, integer *, complex *, complex *, complex *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int clarfx_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *, complex *);
+ static real smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CHSEQR computes the eigenvalues of a complex upper Hessenberg
+ matrix H, and, optionally, the matrices T and Z from the Schur
+ decomposition H = Z T Z**H, where T is an upper triangular matrix
+ (the Schur form), and Z is the unitary matrix of Schur vectors.
+
+ Optionally Z may be postmultiplied into an input unitary matrix Q,
+ so that this routine can give the Schur factorization of a matrix A
+ which has been reduced to the Hessenberg form H by the unitary
+ matrix Q: A = Q*H*Q**H = (QZ)*T*(QZ)**H.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ = 'E': compute eigenvalues only;
+ = 'S': compute eigenvalues and the Schur form T.
+
+ COMPZ (input) CHARACTER*1
+ = 'N': no Schur vectors are computed;
+ = 'I': Z is initialized to the unit matrix and the matrix Z
+ of Schur vectors of H is returned;
+ = 'V': Z must contain an unitary matrix Q on entry, and
+ the product Q*Z is returned.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to CGEBAL, and then passed to CGEHRD
+ when the matrix output by CGEBAL is reduced to Hessenberg
+ form. Otherwise ILO and IHI should be set to 1 and N
+ respectively.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ H (input/output) COMPLEX array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if JOB = 'S', H contains the upper triangular matrix
+ T from the Schur decomposition (the Schur form). If
+ JOB = 'E', the contents of H are unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ W (output) COMPLEX array, dimension (N)
+ The computed eigenvalues. If JOB = 'S', the eigenvalues are
+ stored in the same order as on the diagonal of the Schur form
+ returned in H, with W(i) = H(i,i).
+
+ Z (input/output) COMPLEX array, dimension (LDZ,N)
+ If COMPZ = 'N': Z is not referenced.
+ If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
+ contains the unitary matrix Z of the Schur vectors of H.
+ If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
+ which is assumed to be equal to the unit matrix except for
+ the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
+ Normally Q is the unitary matrix generated by CUNGHR after
+ the call to CGEHRD which formed the Hessenberg matrix H.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z.
+ LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, CHSEQR failed to compute all the
+ eigenvalues in a total of 30*(IHI-ILO+1) iterations;
+ elements 1:ilo-1 and i+1:n of W contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = (initz) || (lsame_(compz, "V"));
+
+ *info = 0;
+ i__1 = max(1,*n);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if ((*ldz < 1) || (wantz && *ldz < max(1,*n))) {
+ *info = -10;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CHSEQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Initialize Z, if necessary */
+
+ if (initz) {
+ claset_("Full", n, n, &c_b55, &c_b56, &z__[z_offset], ldz);
+ }
+
+/* Store the eigenvalues isolated by CGEBAL. */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * h_dim1;
+ w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ i__3 = i__ + i__ * h_dim1;
+ w[i__2].r = h__[i__3].r, w[i__2].i = h__[i__3].i;
+/* L20: */
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+/*
+ Set rows and columns ILO to IHI to zero below the first
+ subdiagonal.
+*/
+
+ i__1 = *ihi - 2;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * h_dim1;
+ h__[i__3].r = 0.f, h__[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ nh = *ihi - *ilo + 1;
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are re-set inside the main loop.
+*/
+
+ if (wantt) {
+ i1 = 1;
+ i2 = *n;
+ } else {
+ i1 = *ilo;
+ i2 = *ihi;
+ }
+
+/* Ensure that the subdiagonal elements are real. */
+
+ i__1 = *ihi;
+ for (i__ = *ilo + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (r_imag(&temp) != 0.f) {
+ r__1 = temp.r;
+ r__2 = r_imag(&temp);
+ rtemp = slapy2_(&r__1, &r__2);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.f;
+ q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__2, &q__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ cscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (i__ < *ihi) {
+ i__2 = i__ + 1 + i__ * h_dim1;
+ i__3 = i__ + 1 + i__ * h_dim1;
+ q__1.r = temp.r * h__[i__3].r - temp.i * h__[i__3].i, q__1.i =
+ temp.r * h__[i__3].i + temp.i * h__[i__3].r;
+ h__[i__2].r = q__1.r, h__[i__2].i = q__1.i;
+ }
+ if (wantz) {
+ cscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1);
+ }
+ }
+/* L50: */
+ }
+
+/*
+ Determine the order of the multi-shift QR algorithm to be used.
+
+ Writing concatenation
+*/
+ i__4[0] = 1, a__1[0] = job;
+ i__4[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
+ ns = ilaenv_(&c__4, "CHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+/* Writing concatenation */
+ i__4[0] = 1, a__1[0] = job;
+ i__4[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
+ maxb = ilaenv_(&c__8, "CHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if (((ns <= 1) || (ns > nh)) || (maxb >= nh)) {
+
+/* Use the standard double-shift algorithm */
+
+ clahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &w[1], ilo,
+ ihi, &z__[z_offset], ldz, info);
+ return 0;
+ }
+ maxb = max(2,maxb);
+/* Computing MIN */
+ i__1 = min(ns,maxb);
+ ns = min(i__1,15);
+
+/*
+ Now 1 < NS <= MAXB < NH.
+
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (nh / ulp);
+
+/* ITN is the total number of multiple-shift QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of at most MAXB. Each iteration of the loop
+ works with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO, or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L60:
+ if (i__ < *ilo) {
+ goto L180;
+ }
+
+/*
+ Perform multiple-shift QR iterations on rows and columns ILO to I
+ until a submatrix of order at most MAXB splits off at the bottom
+ because a subdiagonal element has become negligible.
+*/
+
+ l = *ilo;
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ i__3 = k - 1 + (k - 1) * h_dim1;
+ i__5 = k + k * h_dim1;
+ tst1 = (r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[k -
+ 1 + (k - 1) * h_dim1]), dabs(r__2)) + ((r__3 = h__[i__5]
+ .r, dabs(r__3)) + (r__4 = r_imag(&h__[k + k * h_dim1]),
+ dabs(r__4)));
+ if (tst1 == 0.f) {
+ i__3 = i__ - l + 1;
+ tst1 = clanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork);
+ }
+ i__3 = k + (k - 1) * h_dim1;
+/* Computing MAX */
+ r__2 = ulp * tst1;
+ if ((r__1 = h__[i__3].r, dabs(r__1)) <= dmax(r__2,smlnum)) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible. */
+
+ i__2 = l + (l - 1) * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order <= MAXB has split off. */
+
+ if (l >= i__ - maxb + 1) {
+ goto L170;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! wantt) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 20) || (its == 30)) {
+
+/* Exceptional shifts. */
+
+ i__2 = i__;
+ for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
+ i__3 = ii;
+ i__5 = ii + (ii - 1) * h_dim1;
+ i__6 = ii + ii * h_dim1;
+ r__3 = ((r__1 = h__[i__5].r, dabs(r__1)) + (r__2 = h__[i__6]
+ .r, dabs(r__2))) * 1.5f;
+ w[i__3].r = r__3, w[i__3].i = 0.f;
+/* L90: */
+ }
+ } else {
+
+/* Use eigenvalues of trailing submatrix of order NS as shifts. */
+
+ clacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
+ h_dim1], ldh, s, &c__15);
+ clahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &w[i__ -
+ ns + 1], &c__1, &ns, &z__[z_offset], ldz, &ierr);
+ if (ierr > 0) {
+
+/*
+ If CLAHQR failed to compute all NS eigenvalues, use the
+ unconverged diagonal elements as the remaining shifts.
+*/
+
+ i__2 = ierr;
+ for (ii = 1; ii <= i__2; ++ii) {
+ i__3 = i__ - ns + ii;
+ i__5 = ii + ii * 15 - 16;
+ w[i__3].r = s[i__5].r, w[i__3].i = s[i__5].i;
+/* L100: */
+ }
+ }
+ }
+
+/*
+ Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
+ where G is the Hessenberg submatrix H(L:I,L:I) and w is
+ the vector of shifts (stored in W). The result is
+ stored in the local array V.
+*/
+
+ v[0].r = 1.f, v[0].i = 0.f;
+ i__2 = ns + 1;
+ for (ii = 2; ii <= i__2; ++ii) {
+ i__3 = ii - 1;
+ v[i__3].r = 0.f, v[i__3].i = 0.f;
+/* L110: */
+ }
+ nv = 1;
+ i__2 = i__;
+ for (j = i__ - ns + 1; j <= i__2; ++j) {
+ i__3 = nv + 1;
+ ccopy_(&i__3, v, &c__1, vv, &c__1);
+ i__3 = nv + 1;
+ i__5 = j;
+ q__1.r = -w[i__5].r, q__1.i = -w[i__5].i;
+ cgemv_("No transpose", &i__3, &nv, &c_b56, &h__[l + l * h_dim1],
+ ldh, vv, &c__1, &q__1, v, &c__1);
+ ++nv;
+
+/*
+ Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
+ reset it to the unit vector.
+*/
+
+ itemp = icamax_(&nv, v, &c__1);
+ i__3 = itemp - 1;
+ rtemp = (r__1 = v[i__3].r, dabs(r__1)) + (r__2 = r_imag(&v[itemp
+ - 1]), dabs(r__2));
+ if (rtemp == 0.f) {
+ v[0].r = 1.f, v[0].i = 0.f;
+ i__3 = nv;
+ for (ii = 2; ii <= i__3; ++ii) {
+ i__5 = ii - 1;
+ v[i__5].r = 0.f, v[i__5].i = 0.f;
+/* L120: */
+ }
+ } else {
+ rtemp = dmax(rtemp,smlnum);
+ r__1 = 1.f / rtemp;
+ csscal_(&nv, &r__1, v, &c__1);
+ }
+/* L130: */
+ }
+
+/* Multiple-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = l; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__3 = ns + 1, i__5 = i__ - k + 1;
+ nr = min(i__3,i__5);
+ if (k > l) {
+ ccopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ clarfg_(&nr, v, &v[1], &c__1, &tau);
+ if (k > l) {
+ i__3 = k + (k - 1) * h_dim1;
+ h__[i__3].r = v[0].r, h__[i__3].i = v[0].i;
+ i__3 = i__;
+ for (ii = k + 1; ii <= i__3; ++ii) {
+ i__5 = ii + (k - 1) * h_dim1;
+ h__[i__5].r = 0.f, h__[i__5].i = 0.f;
+/* L140: */
+ }
+ }
+ v[0].r = 1.f, v[0].i = 0.f;
+
+/*
+ Apply G' from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2 - k + 1;
+ r_cnjg(&q__1, &tau);
+ clarfx_("Left", &nr, &i__3, v, &q__1, &h__[k + k * h_dim1], ldh, &
+ work[1]);
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+NR,I).
+
+ Computing MIN
+*/
+ i__5 = k + nr;
+ i__3 = min(i__5,i__) - i1 + 1;
+ clarfx_("Right", &i__3, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
+ &work[1]);
+
+ if (wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ clarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
+ ldz, &work[1]);
+ }
+/* L150: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (r_imag(&temp) != 0.f) {
+ r__1 = temp.r;
+ r__2 = r_imag(&temp);
+ rtemp = slapy2_(&r__1, &r__2);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.f;
+ q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__2, &q__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ cscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (wantz) {
+ cscal_(&nh, &temp, &z__[*ilo + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L160: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L170:
+
+/*
+ A submatrix of order <= MAXB in rows and columns L to I has split
+ off. Use the double-shift QR algorithm to handle it.
+*/
+
+ clahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &w[1], ilo, ihi,
+ &z__[z_offset], ldz, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with a new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L60;
+
+L180:
+ i__1 = max(1,*n);
+ work[1].r = (real) i__1, work[1].i = 0.f;
+ return 0;
+
+/* End of CHSEQR */
+
+} /* chseqr_ */
+
+/* Subroutine */ int clabrd_(integer *m, integer *n, integer *nb, complex *a,
+ integer *lda, real *d__, real *e, complex *tauq, complex *taup,
+ complex *x, integer *ldx, complex *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__;
+ static complex alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemv_(char *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), clarfg_(integer *, complex *, complex *,
+ integer *, complex *), clacgv_(integer *, complex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLABRD reduces the first NB rows and columns of a complex general
+ m by n matrix A to upper or lower real bidiagonal form by a unitary
+ transformation Q' * A * P, and returns the matrices X and Y which
+ are needed to apply the transformation to the unreduced part of A.
+
+ If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
+ bidiagonal form.
+
+ This is an auxiliary routine called by CGEBRD
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A.
+
+ N (input) INTEGER
+ The number of columns in the matrix A.
+
+ NB (input) INTEGER
+ The number of leading rows and columns of A to be reduced.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit, the first NB rows and columns of the matrix are
+ overwritten; the rest of the array is unchanged.
+ If m >= n, elements on and below the diagonal in the first NB
+ columns, with the array TAUQ, represent the unitary
+ matrix Q as a product of elementary reflectors; and
+ elements above the diagonal in the first NB rows, with the
+ array TAUP, represent the unitary matrix P as a product
+ of elementary reflectors.
+ If m < n, elements below the diagonal in the first NB
+ columns, with the array TAUQ, represent the unitary
+ matrix Q as a product of elementary reflectors, and
+ elements on and above the diagonal in the first NB rows,
+ with the array TAUP, represent the unitary matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (NB)
+ The diagonal elements of the first NB rows and columns of
+ the reduced matrix. D(i) = A(i,i).
+
+ E (output) REAL array, dimension (NB)
+ The off-diagonal elements of the first NB rows and columns of
+ the reduced matrix.
+
+ TAUQ (output) COMPLEX array dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix Q. See Further Details.
+
+ TAUP (output) COMPLEX array, dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the unitary matrix P. See Further Details.
+
+ X (output) COMPLEX array, dimension (LDX,NB)
+ The m-by-nb matrix X required to update the unreduced part
+ of A.
+
+ LDX (input) INTEGER
+ The leading dimension of the array X. LDX >= max(1,M).
+
+ Y (output) COMPLEX array, dimension (LDY,NB)
+ The n-by-nb matrix Y required to update the unreduced part
+ of A.
+
+ LDY (output) INTEGER
+ The leading dimension of the array Y. LDY >= max(1,N).
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are complex scalars, and v and u are complex
+ vectors.
+
+ If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
+ A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The elements of the vectors v and u together form the m-by-nb matrix
+ V and the nb-by-n matrix U' which are needed, with X and Y, to apply
+ the transformation to the unreduced part of the matrix, using a block
+ update of the form: A := A - V*Y' - X*U'.
+
+ The contents of A on exit are illustrated by the following examples
+ with nb = 2:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
+ ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
+ ( v1 v2 a a a ) ( v1 1 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix which is unchanged,
+ vi denotes an element of the vector defining H(i), and ui an element
+ of the vector defining G(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda,
+ &y[i__ + y_dim1], ldy, &c_b56, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + x_dim1], ldx,
+ &a[i__ * a_dim1 + 1], &c__1, &c_b56, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1, &
+ tauq[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ + (
+ i__ + 1) * a_dim1], lda, &a[i__ + i__ * a_dim1], &
+ c__1, &c_b55, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ +
+ a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &c_b55, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b56, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &x[i__ +
+ x_dim1], ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b55, &
+ y[i__ * y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b56, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ clacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b56, &a[i__ +
+ (i__ + 1) * a_dim1], lda);
+ clacgv_(&i__, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b56,
+ &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + (i__ + 1) * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ cgemv_("No transpose", &i__2, &i__3, &c_b56, &a[i__ + 1 + (
+ i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b55, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__, &c_b56, &y[i__ + 1
+ + y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b55, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b56, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("No transpose", &i__2, &i__3, &c_b56, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b55, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b56, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + y_dim1], ldy,
+ &a[i__ + a_dim1], lda, &c_b56, &a[i__ + i__ * a_dim1],
+ lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &a[i__ *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b56, &a[i__ +
+ i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &x[i__ + x_dim1], ldx);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = i__ + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ clarfg_(&i__2, &alpha, &a[i__ + min(i__3,*n) * a_dim1], lda, &
+ taup[i__]);
+ i__2 = i__;
+ d__[i__2] = alpha.r;
+ if (i__ < *m) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ cgemv_("No transpose", &i__2, &i__3, &c_b56, &a[i__ + 1 + i__
+ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &c_b55, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &y[i__ +
+ y_dim1], ldy, &a[i__ + i__ * a_dim1], lda, &c_b55, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b56, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ cgemv_("No transpose", &i__2, &i__3, &c_b56, &a[i__ * a_dim1
+ + 1], lda, &a[i__ + i__ * a_dim1], lda, &c_b55, &x[
+ i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b56, &x[
+ i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ cscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b56, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &y[i__ + y_dim1], ldy);
+ i__2 = *m - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__, &q__1, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b56, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*m) + i__ * a_dim1], &c__1,
+ &tauq[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ +
+ 1 + (i__ + 1) * a_dim1], lda, &a[i__ + 1 + i__ *
+ a_dim1], &c__1, &c_b55, &y[i__ + 1 + i__ * y_dim1], &
+ c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b55, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b56, &y[
+ i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__, &c_b56, &x[i__ + 1
+ + x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b55, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Conjugate transpose", &i__, &i__2, &q__1, &a[(i__ + 1)
+ * a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b56, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ } else {
+ i__2 = *n - i__ + 1;
+ clacgv_(&i__2, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CLABRD */
+
+} /* clabrd_ */
+
+/* Subroutine */ int clacgv_(integer *n, complex *x, integer *incx)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, ioff;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLACGV conjugates a complex vector of length N.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of the vector X. N >= 0.
+
+ X (input/output) COMPLEX array, dimension
+ (1+(N-1)*abs(INCX))
+ On entry, the vector of length N to be conjugated.
+ On exit, X is overwritten with conjg(X).
+
+ INCX (input) INTEGER
+ The spacing between successive elements of X.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*incx == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ r_cnjg(&q__1, &x[i__]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+/* L10: */
+ }
+ } else {
+ ioff = 1;
+ if (*incx < 0) {
+ ioff = 1 - (*n - 1) * *incx;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ioff;
+ r_cnjg(&q__1, &x[ioff]);
+ x[i__2].r = q__1.r, x[i__2].i = q__1.i;
+ ioff += *incx;
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of CLACGV */
+
+} /* clacgv_ */
+
+/* Subroutine */ int clacp2_(char *uplo, integer *m, integer *n, real *a,
+ integer *lda, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLACP2 copies all or part of a real two-dimensional matrix A to a
+ complex matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper trapezium
+ is accessed; if UPLO = 'L', only the lower trapezium is
+ accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) COMPLEX array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4], b[i__3].i = 0.f;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of CLACP2 */
+
+} /* clacp2_ */
+
+/* Subroutine */ int clacpy_(char *uplo, integer *m, integer *n, complex *a,
+ integer *lda, complex *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ CLACPY copies all or part of a two-dimensional matrix A to another
+ matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper trapezium
+ is accessed; if UPLO = 'L', only the lower trapezium is
+ accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) COMPLEX array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ i__4 = i__ + j * a_dim1;
+ b[i__3].r = a[i__4].r, b[i__3].i = a[i__4].i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ return 0;
+
+/* End of CLACPY */
+
+} /* clacpy_ */
+
+/* Subroutine */ int clacrm_(integer *m, integer *n, complex *a, integer *lda,
+ real *b, integer *ldb, complex *c__, integer *ldc, real *rwork)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, a_dim1, a_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLACRM performs a very simple matrix-matrix multiplication:
+ C := A * B,
+ where A is M by N and complex; B is N by N and real;
+ C is M by N and complex.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A and of the matrix C.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns and rows of the matrix B and
+ the number of columns of the matrix C.
+ N >= 0.
+
+ A (input) COMPLEX array, dimension (LDA, N)
+ A contains the M by N matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >=max(1,M).
+
+ B (input) REAL array, dimension (LDB, N)
+ B contains the N by N matrix B.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >=max(1,N).
+
+ C (input) COMPLEX array, dimension (LDC, N)
+ C contains the M by N matrix C.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >=max(1,N).
+
+ RWORK (workspace) REAL array, dimension (2*M*N)
+
+ =====================================================================
+
+
+ Quick return if possible.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ rwork[(j - 1) * *m + i__] = a[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ sgemm_("N", "N", m, n, n, &c_b1011, &rwork[1], m, &b[b_offset], ldb, &
+ c_b320, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = r_imag(&a[i__ + j * a_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemm_("N", "N", m, n, n, &c_b1011, &rwork[1], m, &b[b_offset], ldb, &
+ c_b320, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ r__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ q__1.r = r__1, q__1.i = rwork[i__5];
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of CLACRM */
+
+} /* clacrm_ */
+
+/* Complex */ VOID cladiv_(complex * ret_val, complex *x, complex *y)
+{
+ /* System generated locals */
+ real r__1, r__2, r__3, r__4;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static real zi, zr;
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+ , real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLADIV := X / Y, where X and Y are complex. The computation of X / Y
+ will not overflow on an intermediary step unless the results
+ overflows.
+
+ Arguments
+ =========
+
+ X (input) COMPLEX
+ Y (input) COMPLEX
+ The complex scalars X and Y.
+
+ =====================================================================
+*/
+
+
+ r__1 = x->r;
+ r__2 = r_imag(x);
+ r__3 = y->r;
+ r__4 = r_imag(y);
+ sladiv_(&r__1, &r__2, &r__3, &r__4, &zr, &zi);
+ q__1.r = zr, q__1.i = zi;
+ ret_val->r = q__1.r, ret_val->i = q__1.i;
+
+ return ;
+
+/* End of CLADIV */
+
+} /* cladiv_ */
+
+/* Subroutine */ int claed0_(integer *qsiz, integer *n, real *d__, real *e,
+ complex *q, integer *ldq, complex *qstore, integer *ldqs, real *rwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2;
+ static real temp;
+ static integer curr, iperm;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer indxq, iwrem;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static integer iqptr;
+ extern /* Subroutine */ int claed7_(integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, complex *, integer *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *, integer *, real *, complex *, real *, integer *,
+ integer *);
+ static integer tlvls;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *);
+ static integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer igivnm, submat, curprb, subpbs, igivpt, curlvl, matsiz,
+ iprmpt, smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ Using the divide and conquer method, CLAED0 computes all eigenvalues
+ of a symmetric tridiagonal matrix which is one diagonal block of
+ those from reducing a dense or band Hermitian matrix and
+ corresponding eigenvectors of the dense or band matrix.
+
+ Arguments
+ =========
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the off-diagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Q (input/output) COMPLEX array, dimension (LDQ,N)
+ On entry, Q must contain an QSIZ x N matrix whose columns
+ unitarily orthonormal. It is a part of the unitary matrix
+ that reduces the full dense Hermitian matrix to a
+ (reducible) symmetric tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ IWORK (workspace) INTEGER array,
+ the dimension of IWORK must be at least
+ 6 + 6*N + 5*N*lg N
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+
+ RWORK (workspace) REAL array,
+ dimension (1 + 3*N + 2*N*lg N + 3*N**2)
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+
+ QSTORE (workspace) COMPLEX array, dimension (LDQS, N)
+ Used to store parts of
+ the eigenvector matrix when the updating matrix multiplies
+ take place.
+
+ LDQS (input) INTEGER
+ The leading dimension of the array QSTORE.
+ LDQS >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ =====================================================================
+
+ Warning: N could be as big as QSIZ!
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/*
+ IF( ICOMPQ .LT. 0 .OR. ICOMPQ .GT. 2 ) THEN
+ INFO = -1
+ ELSE IF( ( ICOMPQ .EQ. 1 ) .AND. ( QSIZ .LT. MAX( 0, N ) ) )
+ $ THEN
+*/
+ if (*qsiz < max(0,*n)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "CLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Determine the size and placement of the submatrices, and save in
+ the leading elements of IWORK.
+*/
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[((j) << (1)) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/*
+ Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
+ using rank-1 modifications (cuts).
+*/
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (r__1 = e[smm1], dabs(r__1));
+ d__[submat] -= (r__1 = e[smm1], dabs(r__1));
+/* L40: */
+ }
+
+ indxq = ((*n) << (2)) + 3;
+
+/*
+ Set up workspaces for eigenvalues only/accumulate new vectors
+ routine
+*/
+
+ temp = log((real) (*n)) / log(2.f);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + ((*n) << (1)) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+/* Initialize pointers */
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+
+/*
+ Solve each submatrix eigenproblem at the bottom of the divide and
+ conquer tree.
+*/
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ ll = iq - 1 + iwork[iqptr + curr];
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &rwork[ll], &matsiz, &
+ rwork[1], info);
+ clacrm_(qsiz, &matsiz, &q[submat * q_dim1 + 1], ldq, &rwork[ll], &
+ matsiz, &qstore[submat * qstore_dim1 + 1], ldqs, &rwork[iwrem]
+ );
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ Successively merge eigensystems of adjacent submatrices
+ into eigensystem for the corresponding larger matrix.
+
+ while ( SUBPBS > 1 )
+*/
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/*
+ Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
+ into an eigensystem of size MATSIZ. CLAED7 handles the case
+ when the eigenvectors of a full or band Hermitian matrix (which
+ was reduced to tridiagonal form) are desired.
+
+ I am free to use Q as a valuable working space until Loop 150.
+*/
+
+ claed7_(&matsiz, &msd2, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &e[
+ submat + msd2 - 1], &iwork[indxq + submat], &rwork[iq], &
+ iwork[iqptr], &iwork[iprmpt], &iwork[iperm], &iwork[
+ igivpt], &iwork[igivcl], &rwork[igivnm], &q[submat *
+ q_dim1 + 1], &rwork[iwrem], &iwork[subpbs + 1], info);
+ if (*info > 0) {
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+ return 0;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/*
+ end while
+
+ Re-merge the eigenvalues/vectors which were deflated at the final
+ merge step.
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ rwork[i__] = d__[j];
+ ccopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1 + 1]
+ , &c__1);
+/* L100: */
+ }
+ scopy_(n, &rwork[1], &c__1, &d__[1], &c__1);
+
+ return 0;
+
+/* End of CLAED0 */
+
+} /* claed0_ */
+
+/* Subroutine */ int claed7_(integer *n, integer *cutpnt, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, real *d__, complex *
+ q, integer *ldq, real *rho, integer *indxq, real *qstore, integer *
+ qptr, integer *prmptr, integer *perm, integer *givptr, integer *
+ givcol, real *givnum, complex *work, real *rwork, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, k, n1, n2, iq, iw, iz, ptr, ind1, ind2, indx, curr,
+ indxc, indxp;
+ extern /* Subroutine */ int claed8_(integer *, integer *, integer *,
+ complex *, integer *, real *, real *, integer *, real *, real *,
+ complex *, integer *, real *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *), slaed9_(
+ integer *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ slaeda_(integer *, integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, integer *, real *
+ , real *, integer *);
+ static integer idlmda;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *),
+ xerbla_(char *, integer *), slamrg_(integer *, integer *,
+ real *, integer *, integer *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLAED7 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and optionally eigenvectors of a dense or banded
+ Hermitian matrix that has been reduced to tridiagonal form.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine SLAED2.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine SLAED4 (as called by SLAED3).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= curlvl <= tlvls.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) COMPLEX array, dimension (LDQ,N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ RHO (input) REAL
+ Contains the subdiagonal element used to create the rank-1
+ modification.
+
+ INDXQ (output) INTEGER array, dimension (N)
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order,
+ ie. D( INDXQ( I = 1, N ) ) will be in ascending order.
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ RWORK (workspace) REAL array,
+ dimension (3*N+2*QSIZ*N)
+
+ WORK (workspace) COMPLEX array, dimension (QSIZ*N)
+
+ QSTORE (input/output) REAL array, dimension (N**2+1)
+ Stores eigenvectors of submatrices encountered during
+ divide and conquer, packed together. QPTR points to
+ beginning of the submatrices.
+
+ QPTR (input/output) INTEGER array, dimension (N+2)
+ List of indices pointing to beginning of submatrices stored
+ in QSTORE. The submatrices are numbered starting at the
+ bottom left of the divide and conquer tree, from left to
+ right and bottom to top.
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and also the size of
+ the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) REAL array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+/*
+ IF( ICOMPQ.LT.0 .OR. ICOMPQ.GT.1 ) THEN
+ INFO = -1
+ ELSE IF( N.LT.0 ) THEN
+*/
+ if (*n < 0) {
+ *info = -1;
+ } else if ((min(1,*n) > *cutpnt) || (*n < *cutpnt)) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in SLAED2 and SLAED3.
+*/
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ slaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &rwork[iz], &rwork[
+ iz + *n], info);
+
+/*
+ When solving the final problem, we no longer need the stored data,
+ so we will overwrite the data from this level onto the previously
+ used storage space.
+*/
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ claed8_(&k, n, qsiz, &q[q_offset], ldq, &d__[1], rho, cutpnt, &rwork[iz],
+ &rwork[idlmda], &work[1], qsiz, &rwork[iw], &iwork[indxp], &iwork[
+ indx], &indxq[1], &perm[prmptr[curr]], &givptr[curr + 1], &givcol[
+ ((givptr[curr]) << (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1]
+ , info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ slaed9_(&k, &c__1, &k, n, &d__[1], &rwork[iq], &k, rho, &rwork[idlmda]
+ , &rwork[iw], &qstore[qptr[curr]], &k, info);
+ clacrm_(qsiz, &k, &work[1], qsiz, &qstore[qptr[curr]], &k, &q[
+ q_offset], ldq, &rwork[iq]);
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Prepare the INDXQ sorting premutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ ind1 = 1;
+ ind2 = *n;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLAED7 */
+
+} /* claed7_ */
+
+/* Subroutine */ int claed8_(integer *k, integer *n, integer *qsiz, complex *
+ q, integer *ldq, real *d__, real *rho, integer *cutpnt, real *z__,
+ real *dlamda, complex *q2, integer *ldq2, real *w, integer *indxp,
+ integer *indx, integer *indxq, integer *perm, integer *givptr,
+ integer *givcol, real *givnum, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real c__;
+ static integer i__, j;
+ static real s, t;
+ static integer k2, n1, n2, jp, n1p1;
+ static real eps, tau, tol;
+ static integer jlam, imax, jmax;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ ccopy_(integer *, complex *, integer *, complex *, integer *),
+ csrot_(integer *, complex *, integer *, complex *, integer *,
+ real *, real *), scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLAED8 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny element in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ K (output) INTEGER
+ Contains the number of non-deflated eigenvalues.
+ This is the order of the related secular equation.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the unitary matrix used to reduce
+ the dense or band matrix to tridiagonal form.
+ QSIZ >= N if ICOMPQ = 1.
+
+ Q (input/output) COMPLEX array, dimension (LDQ,N)
+ On entry, Q contains the eigenvectors of the partially solved
+ system which has been previously updated in matrix
+ multiplies with other partially solved eigensystems.
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max( 1, N ).
+
+ D (input/output) REAL array, dimension (N)
+ On entry, D contains the eigenvalues of the two submatrices to
+ be combined. On exit, D contains the trailing (N-K) updated
+ eigenvalues (those which were deflated) sorted into increasing
+ order.
+
+ RHO (input/output) REAL
+ Contains the off diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined. RHO is modified during the computation to
+ the value required by SLAED3.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. MIN(1,N) <= CUTPNT <= N.
+
+ Z (input) REAL array, dimension (N)
+ On input this vector contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix). The contents of Z are
+ destroyed during the updating process.
+
+ DLAMDA (output) REAL array, dimension (N)
+ Contains a copy of the first K eigenvalues which will be used
+ by SLAED3 to form the secular equation.
+
+ Q2 (output) COMPLEX array, dimension (LDQ2,N)
+ If ICOMPQ = 0, Q2 is not referenced. Otherwise,
+ Contains a copy of the first K eigenvectors which will be used
+ by SLAED7 in a matrix multiply (SGEMM) to update the new
+ eigenvectors.
+
+ LDQ2 (input) INTEGER
+ The leading dimension of the array Q2. LDQ2 >= max( 1, N ).
+
+ W (output) REAL array, dimension (N)
+ This will hold the first k values of the final
+ deflation-altered z-vector and will be passed to SLAED3.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output INDXP(1:K)
+ points to the nondeflated D-values and INDXP(K+1:N)
+ points to the deflated eigenvalues.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ INDXQ (input) INTEGER array, dimension (N)
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that elements in
+ the second half of this permutation must first have CUTPNT
+ added to their values in order to be accurate.
+
+ PERM (output) INTEGER array, dimension (N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (output) INTEGER
+ Contains the number of Givens rotations which took place in
+ this subproblem.
+
+ GIVCOL (output) INTEGER array, dimension (2, N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (output) REAL array, dimension (2, N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --d__;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --indxp;
+ --indx;
+ --indxq;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*qsiz < *n) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -5;
+ } else if ((*cutpnt < min(1,*n)) || (*cutpnt > *n)) {
+ *info = -8;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b1290, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1.f / sqrt(2.f);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ sscal_(n, &t, &z__[1], &c__1);
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+ tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1));
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ -- except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+ , &c__1);
+/* L50: */
+ }
+ clacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ return 0;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ jlam = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ ++j;
+ if (j > *n) {
+ goto L90;
+ }
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = slapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.f;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
+ givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
+ givnum[((*givptr) << (1)) + 1] = c__;
+ givnum[((*givptr) << (1)) + 2] = s;
+ csrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[
+ indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L80:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L80;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L70;
+L90:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L100:
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], &
+ c__1);
+/* L110: */
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ if (*k < *n) {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ clacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k +
+ 1) * q_dim1 + 1], ldq);
+ }
+
+ return 0;
+
+/* End of CLAED8 */
+
+} /* claed8_ */
+
+/* Subroutine */ int clahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *w,
+ integer *iloz, integer *ihiz, complex *z__, integer *ldz, integer *
+ info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void c_sqrt(complex *, complex *), r_cnjg(complex *, complex *);
+ double c_abs(complex *);
+
+ /* Local variables */
+ static integer i__, j, k, l, m;
+ static real s;
+ static complex t, u, v[2], x, y;
+ static integer i1, i2;
+ static complex t1;
+ static real t2;
+ static complex v2;
+ static real h10;
+ static complex h11;
+ static real h21;
+ static complex h22;
+ static integer nh, nz;
+ static complex h11s;
+ static integer itn, its;
+ static real ulp;
+ static complex sum;
+ static real tst1;
+ static complex temp;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer *, complex *,
+ integer *);
+ static real rtemp, rwork[1];
+ extern /* Subroutine */ int clarfg_(integer *, complex *, complex *,
+ integer *, complex *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *), clanhs_(char *, integer *,
+ complex *, integer *, real *);
+ static real smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLAHQR is an auxiliary routine called by CHSEQR to update the
+ eigenvalues and Schur decomposition already computed by CHSEQR, by
+ dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
+
+ Arguments
+ =========
+
+ WANTT (input) LOGICAL
+ = .TRUE. : the full Schur form T is required;
+ = .FALSE.: only eigenvalues are required.
+
+ WANTZ (input) LOGICAL
+ = .TRUE. : the matrix of Schur vectors Z is required;
+ = .FALSE.: Schur vectors are not required.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows and
+ columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless ILO = 1).
+ CLAHQR works primarily with the Hessenberg submatrix in rows
+ and columns ILO to IHI, but applies transformations to all of
+ H if WANTT is .TRUE..
+ 1 <= ILO <= max(1,IHI); IHI <= N.
+
+ H (input/output) COMPLEX array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if WANTT is .TRUE., H is upper triangular in rows
+ and columns ILO:IHI, with any 2-by-2 diagonal blocks in
+ standard form. If WANTT is .FALSE., the contents of H are
+ unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ W (output) COMPLEX array, dimension (N)
+ The computed eigenvalues ILO to IHI are stored in the
+ corresponding elements of W. If WANTT is .TRUE., the
+ eigenvalues are stored in the same order as on the diagonal
+ of the Schur form returned in H, with W(i) = H(i,i).
+
+ ILOZ (input) INTEGER
+ IHIZ (input) INTEGER
+ Specify the rows of Z to which transformations must be
+ applied if WANTZ is .TRUE..
+ 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
+
+ Z (input/output) COMPLEX array, dimension (LDZ,N)
+ If WANTZ is .TRUE., on entry Z must contain the current
+ matrix Z of transformations accumulated by CHSEQR, and on
+ exit Z has been updated; transformations are applied only to
+ the submatrix Z(ILOZ:IHIZ,ILO:IHI).
+ If WANTZ is .FALSE., Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = i, CLAHQR failed to compute all the
+ eigenvalues ILO to IHI in a total of 30*(IHI-ILO+1)
+ iterations; elements i+1:ihi of W contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --w;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ i__1 = *ilo;
+ i__2 = *ilo + *ilo * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+ return 0;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/*
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ ulp = slamch_("Precision");
+ smlnum = slamch_("Safe minimum") / ulp;
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of 1. Each iteration of the loop works
+ with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO, or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L10:
+ if (i__ < *ilo) {
+ goto L130;
+ }
+
+/*
+ Perform QR iterations on rows and columns ILO to I until a
+ submatrix of order 1 splits off at the bottom because a
+ subdiagonal element has become negligible.
+*/
+
+ l = *ilo;
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ i__3 = k - 1 + (k - 1) * h_dim1;
+ i__4 = k + k * h_dim1;
+ tst1 = (r__1 = h__[i__3].r, dabs(r__1)) + (r__2 = r_imag(&h__[k -
+ 1 + (k - 1) * h_dim1]), dabs(r__2)) + ((r__3 = h__[i__4]
+ .r, dabs(r__3)) + (r__4 = r_imag(&h__[k + k * h_dim1]),
+ dabs(r__4)));
+ if (tst1 == 0.f) {
+ i__3 = i__ - l + 1;
+ tst1 = clanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, rwork);
+ }
+ i__3 = k + (k - 1) * h_dim1;
+/* Computing MAX */
+ r__2 = ulp * tst1;
+ if ((r__1 = h__[i__3].r, dabs(r__1)) <= dmax(r__2,smlnum)) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ i__2 = l + (l - 1) * h_dim1;
+ h__[i__2].r = 0.f, h__[i__2].i = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order 1 has split off. */
+
+ if (l >= i__) {
+ goto L120;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 10) || (its == 20)) {
+
+/* Exceptional shift. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ s = (r__1 = h__[i__2].r, dabs(r__1)) * .75f;
+ i__2 = i__ + i__ * h_dim1;
+ q__1.r = s + h__[i__2].r, q__1.i = h__[i__2].i;
+ t.r = q__1.r, t.i = q__1.i;
+ } else {
+
+/* Wilkinson's shift. */
+
+ i__2 = i__ + i__ * h_dim1;
+ t.r = h__[i__2].r, t.i = h__[i__2].i;
+ i__2 = i__ - 1 + i__ * h_dim1;
+ i__3 = i__ + (i__ - 1) * h_dim1;
+ r__1 = h__[i__3].r;
+ q__1.r = r__1 * h__[i__2].r, q__1.i = r__1 * h__[i__2].i;
+ u.r = q__1.r, u.i = q__1.i;
+ if ((u.r != 0.f) || (u.i != 0.f)) {
+ i__2 = i__ - 1 + (i__ - 1) * h_dim1;
+ q__2.r = h__[i__2].r - t.r, q__2.i = h__[i__2].i - t.i;
+ q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
+ x.r = q__1.r, x.i = q__1.i;
+ q__3.r = x.r * x.r - x.i * x.i, q__3.i = x.r * x.i + x.i *
+ x.r;
+ q__2.r = q__3.r + u.r, q__2.i = q__3.i + u.i;
+ c_sqrt(&q__1, &q__2);
+ y.r = q__1.r, y.i = q__1.i;
+ if (x.r * y.r + r_imag(&x) * r_imag(&y) < 0.f) {
+ q__1.r = -y.r, q__1.i = -y.i;
+ y.r = q__1.r, y.i = q__1.i;
+ }
+ q__3.r = x.r + y.r, q__3.i = x.i + y.i;
+ cladiv_(&q__2, &u, &q__3);
+ q__1.r = t.r - q__2.r, q__1.i = t.i - q__2.i;
+ t.r = q__1.r, t.i = q__1.i;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__2 = l + 1;
+ for (m = i__ - 1; m >= i__2; --m) {
+
+/*
+ Determine the effect of starting the single-shift QR
+ iteration at row M, and see if this would make H(M,M-1)
+ negligible.
+*/
+
+ i__3 = m + m * h_dim1;
+ h11.r = h__[i__3].r, h11.i = h__[i__3].i;
+ i__3 = m + 1 + (m + 1) * h_dim1;
+ h22.r = h__[i__3].r, h22.i = h__[i__3].i;
+ q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ i__3 = m + 1 + m * h_dim1;
+ h21 = h__[i__3].r;
+ s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(
+ r__2)) + dabs(h21);
+ q__1.r = h11s.r / s, q__1.i = h11s.i / s;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.f;
+ i__3 = m + (m - 1) * h_dim1;
+ h10 = h__[i__3].r;
+ tst1 = ((r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(
+ r__2))) * ((r__3 = h11.r, dabs(r__3)) + (r__4 = r_imag(&
+ h11), dabs(r__4)) + ((r__5 = h22.r, dabs(r__5)) + (r__6 =
+ r_imag(&h22), dabs(r__6))));
+ if ((r__1 = h10 * h21, dabs(r__1)) <= ulp * tst1) {
+ goto L50;
+ }
+/* L40: */
+ }
+ i__2 = l + l * h_dim1;
+ h11.r = h__[i__2].r, h11.i = h__[i__2].i;
+ i__2 = l + 1 + (l + 1) * h_dim1;
+ h22.r = h__[i__2].r, h22.i = h__[i__2].i;
+ q__1.r = h11.r - t.r, q__1.i = h11.i - t.i;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ i__2 = l + 1 + l * h_dim1;
+ h21 = h__[i__2].r;
+ s = (r__1 = h11s.r, dabs(r__1)) + (r__2 = r_imag(&h11s), dabs(r__2))
+ + dabs(h21);
+ q__1.r = h11s.r / s, q__1.i = h11s.i / s;
+ h11s.r = q__1.r, h11s.i = q__1.i;
+ h21 /= s;
+ v[0].r = h11s.r, v[0].i = h11s.i;
+ v[1].r = h21, v[1].i = 0.f;
+L50:
+
+/* Single-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = m; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix.
+
+ V(2) is always real before the call to CLARFG, and hence
+ after the call T2 ( = T1*V(2) ) is also real.
+*/
+
+ if (k > m) {
+ ccopy_(&c__2, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ clarfg_(&c__2, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ i__3 = k + (k - 1) * h_dim1;
+ h__[i__3].r = v[0].r, h__[i__3].i = v[0].i;
+ i__3 = k + 1 + (k - 1) * h_dim1;
+ h__[i__3].r = 0.f, h__[i__3].i = 0.f;
+ }
+ v2.r = v[1].r, v2.i = v[1].i;
+ q__1.r = t1.r * v2.r - t1.i * v2.i, q__1.i = t1.r * v2.i + t1.i *
+ v2.r;
+ t2 = q__1.r;
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ r_cnjg(&q__3, &t1);
+ i__4 = k + j * h_dim1;
+ q__2.r = q__3.r * h__[i__4].r - q__3.i * h__[i__4].i, q__2.i =
+ q__3.r * h__[i__4].i + q__3.i * h__[i__4].r;
+ i__5 = k + 1 + j * h_dim1;
+ q__4.r = t2 * h__[i__5].r, q__4.i = t2 * h__[i__5].i;
+ q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__4 = k + j * h_dim1;
+ i__5 = k + j * h_dim1;
+ q__1.r = h__[i__5].r - sum.r, q__1.i = h__[i__5].i - sum.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ i__4 = k + 1 + j * h_dim1;
+ i__5 = k + 1 + j * h_dim1;
+ q__2.r = sum.r * v2.r - sum.i * v2.i, q__2.i = sum.r * v2.i +
+ sum.i * v2.r;
+ q__1.r = h__[i__5].r - q__2.r, q__1.i = h__[i__5].i - q__2.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+/* L60: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+2,I).
+
+ Computing MIN
+*/
+ i__4 = k + 2;
+ i__3 = min(i__4,i__);
+ for (j = i1; j <= i__3; ++j) {
+ i__4 = j + k * h_dim1;
+ q__2.r = t1.r * h__[i__4].r - t1.i * h__[i__4].i, q__2.i =
+ t1.r * h__[i__4].i + t1.i * h__[i__4].r;
+ i__5 = j + (k + 1) * h_dim1;
+ q__3.r = t2 * h__[i__5].r, q__3.i = t2 * h__[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__4 = j + k * h_dim1;
+ i__5 = j + k * h_dim1;
+ q__1.r = h__[i__5].r - sum.r, q__1.i = h__[i__5].i - sum.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+ i__4 = j + (k + 1) * h_dim1;
+ i__5 = j + (k + 1) * h_dim1;
+ r_cnjg(&q__3, &v2);
+ q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r *
+ q__3.i + sum.i * q__3.r;
+ q__1.r = h__[i__5].r - q__2.r, q__1.i = h__[i__5].i - q__2.i;
+ h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
+/* L70: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ i__4 = j + k * z_dim1;
+ q__2.r = t1.r * z__[i__4].r - t1.i * z__[i__4].i, q__2.i =
+ t1.r * z__[i__4].i + t1.i * z__[i__4].r;
+ i__5 = j + (k + 1) * z_dim1;
+ q__3.r = t2 * z__[i__5].r, q__3.i = t2 * z__[i__5].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__4 = j + k * z_dim1;
+ i__5 = j + k * z_dim1;
+ q__1.r = z__[i__5].r - sum.r, q__1.i = z__[i__5].i -
+ sum.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+ i__4 = j + (k + 1) * z_dim1;
+ i__5 = j + (k + 1) * z_dim1;
+ r_cnjg(&q__3, &v2);
+ q__2.r = sum.r * q__3.r - sum.i * q__3.i, q__2.i = sum.r *
+ q__3.i + sum.i * q__3.r;
+ q__1.r = z__[i__5].r - q__2.r, q__1.i = z__[i__5].i -
+ q__2.i;
+ z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
+/* L80: */
+ }
+ }
+
+ if (k == m && m > l) {
+
+/*
+ If the QR step was started at row M > L because two
+ consecutive small subdiagonals were found, then extra
+ scaling must be performed to ensure that H(M,M-1) remains
+ real.
+*/
+
+ q__1.r = 1.f - t1.r, q__1.i = 0.f - t1.i;
+ temp.r = q__1.r, temp.i = q__1.i;
+ r__1 = c_abs(&temp);
+ q__1.r = temp.r / r__1, q__1.i = temp.i / r__1;
+ temp.r = q__1.r, temp.i = q__1.i;
+ i__3 = m + 1 + m * h_dim1;
+ i__4 = m + 1 + m * h_dim1;
+ r_cnjg(&q__2, &temp);
+ q__1.r = h__[i__4].r * q__2.r - h__[i__4].i * q__2.i, q__1.i =
+ h__[i__4].r * q__2.i + h__[i__4].i * q__2.r;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+ if (m + 2 <= i__) {
+ i__3 = m + 2 + (m + 1) * h_dim1;
+ i__4 = m + 2 + (m + 1) * h_dim1;
+ q__1.r = h__[i__4].r * temp.r - h__[i__4].i * temp.i,
+ q__1.i = h__[i__4].r * temp.i + h__[i__4].i *
+ temp.r;
+ h__[i__3].r = q__1.r, h__[i__3].i = q__1.i;
+ }
+ i__3 = i__;
+ for (j = m; j <= i__3; ++j) {
+ if (j != m + 1) {
+ if (i2 > j) {
+ i__4 = i2 - j;
+ cscal_(&i__4, &temp, &h__[j + (j + 1) * h_dim1],
+ ldh);
+ }
+ i__4 = j - i1;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__4, &q__1, &h__[i1 + j * h_dim1], &c__1);
+ if (*wantz) {
+ r_cnjg(&q__1, &temp);
+ cscal_(&nz, &q__1, &z__[*iloz + j * z_dim1], &
+ c__1);
+ }
+ }
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+
+/* Ensure that H(I,I-1) is real. */
+
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ temp.r = h__[i__2].r, temp.i = h__[i__2].i;
+ if (r_imag(&temp) != 0.f) {
+ rtemp = c_abs(&temp);
+ i__2 = i__ + (i__ - 1) * h_dim1;
+ h__[i__2].r = rtemp, h__[i__2].i = 0.f;
+ q__1.r = temp.r / rtemp, q__1.i = temp.i / rtemp;
+ temp.r = q__1.r, temp.i = q__1.i;
+ if (i2 > i__) {
+ i__2 = i2 - i__;
+ r_cnjg(&q__1, &temp);
+ cscal_(&i__2, &q__1, &h__[i__ + (i__ + 1) * h_dim1], ldh);
+ }
+ i__2 = i__ - i1;
+ cscal_(&i__2, &temp, &h__[i1 + i__ * h_dim1], &c__1);
+ if (*wantz) {
+ cscal_(&nz, &temp, &z__[*iloz + i__ * z_dim1], &c__1);
+ }
+ }
+
+/* L110: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L120:
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ i__1 = i__;
+ i__2 = i__ + i__ * h_dim1;
+ w[i__1].r = h__[i__2].r, w[i__1].i = h__[i__2].i;
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L10;
+
+L130:
+ return 0;
+
+/* End of CLAHQR */
+
+} /* clahqr_ */
+
+/* Subroutine */ int clahrd_(integer *n, integer *k, integer *nb, complex *a,
+ integer *lda, complex *tau, complex *t, integer *ldt, complex *y,
+ integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__;
+ static complex ei;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), cgemv_(char *, integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *), ccopy_(integer *, complex *, integer *,
+ complex *, integer *), caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), ctrmv_(char *, char *, char *,
+ integer *, complex *, integer *, complex *, integer *), clarfg_(integer *, complex *, complex *, integer
+ *, complex *), clacgv_(integer *, complex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLAHRD reduces the first NB columns of a complex general n-by-(n-k+1)
+ matrix A so that elements below the k-th subdiagonal are zero. The
+ reduction is performed by a unitary similarity transformation
+ Q' * A * Q. The routine returns the matrices V and T which determine
+ Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
+
+ This is an auxiliary routine called by CGEHRD.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ K (input) INTEGER
+ The offset for the reduction. Elements below the k-th
+ subdiagonal in the first NB columns are reduced to zero.
+
+ NB (input) INTEGER
+ The number of columns to be reduced.
+
+ A (input/output) COMPLEX array, dimension (LDA,N-K+1)
+ On entry, the n-by-(n-k+1) general matrix A.
+ On exit, the elements on and above the k-th subdiagonal in
+ the first NB columns are overwritten with the corresponding
+ elements of the reduced matrix; the elements below the k-th
+ subdiagonal, with the array TAU, represent the matrix Q as a
+ product of elementary reflectors. The other columns of A are
+ unchanged. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) COMPLEX array, dimension (NB)
+ The scalar factors of the elementary reflectors. See Further
+ Details.
+
+ T (output) COMPLEX array, dimension (LDT,NB)
+ The upper triangular matrix T.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= NB.
+
+ Y (output) COMPLEX array, dimension (LDY,NB)
+ The n-by-nb matrix Y.
+
+ LDY (input) INTEGER
+ The leading dimension of the array Y. LDY >= max(1,N).
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of nb elementary reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
+ A(i+k+1:n,i), and tau in TAU(i).
+
+ The elements of the vectors v together form the (n-k+1)-by-nb matrix
+ V which is needed, with T and Y, to apply the transformation to the
+ unreduced part of the matrix, using an update of the form:
+ A := (I - V*T*V') * (A - Y*V').
+
+ The contents of A on exit are illustrated by the following example
+ with n = 7, k = 3 and nb = 2:
+
+ ( a h a a a )
+ ( a h a a a )
+ ( a h a a a )
+ ( h h a a a )
+ ( v1 h a a a )
+ ( v1 v2 a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/*
+ Update A(1:n,i)
+
+ Compute i-th column of A - Y * V'
+*/
+
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &a[*k
+ + i__ - 1 + a_dim1], lda, &c_b56, &a[i__ * a_dim1 + 1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[*k + i__ - 1 + a_dim1], lda);
+
+/*
+ Apply I - V * T' * V' to this column (call it b) from the
+ left, using the last column of T as workspace
+
+ Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
+ ( V2 ) ( b2 )
+
+ where V1 is unit lower triangular
+
+ w := V1' * b1
+*/
+
+ i__2 = i__ - 1;
+ ccopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "Conjugate transpose", "Unit", &i__2, &a[*k + 1 +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b56,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "Conjugate transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[*k + i__ + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1, &c_b56, &a[*k + i__ +
+ i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ ctrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+ , lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ caxpy_(&i__2, &q__1, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 + i__
+ * a_dim1], &c__1);
+
+ i__2 = *k + i__ - 1 + (i__ - 1) * a_dim1;
+ a[i__2].r = ei.r, a[i__2].i = ei.i;
+ }
+
+/*
+ Generate the elementary reflector H(i) to annihilate
+ A(k+i+1:n,i)
+*/
+
+ i__2 = *k + i__ + i__ * a_dim1;
+ ei.r = a[i__2].r, ei.i = a[i__2].i;
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ clarfg_(&i__2, &ei, &a[min(i__3,*n) + i__ * a_dim1], &c__1, &tau[i__])
+ ;
+ i__2 = *k + i__ + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ cgemv_("No transpose", n, &i__2, &c_b56, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b55, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[*k + i__ +
+ a_dim1], lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b55, &t[
+ i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", n, &i__2, &q__1, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b56, &y[i__ * y_dim1 + 1], &c__1);
+ cscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cscal_(&i__2, &q__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+
+/* L10: */
+ }
+ i__1 = *k + *nb + *nb * a_dim1;
+ a[i__1].r = ei.r, a[i__1].i = ei.i;
+
+ return 0;
+
+/* End of CLAHRD */
+
+} /* clahrd_ */
+
+/* Subroutine */ int clals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, complex *b, integer *ldb, complex *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ rwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, difr_dim1, difr_offset, givnum_dim1,
+ givnum_offset, poles_dim1, poles_offset, b_dim1, b_offset,
+ bx_dim1, bx_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static real dj;
+ static integer nlp1, jcol;
+ static real temp;
+ static integer jrow;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static real diflj, difrj, dsigj;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), sgemv_(char *, integer *, integer *, real *
+ , real *, integer *, real *, integer *, real *, real *, integer *), csrot_(integer *, complex *, integer *, complex *,
+ integer *, real *, real *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, complex *, integer *, integer *), csscal_(integer *, real *, complex *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), xerbla_(char *, integer *);
+ static real dsigjp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ CLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) COMPLEX array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) COMPLEX array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) REAL array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) REAL array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) REAL array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) REAL array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ RWORK (workspace) REAL array, dimension
+ ( K*(1+NRHS) + 2*NRHS )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --rwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ csrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
+ ;
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ ccopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ ccopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ csscal_(nrhs, &c_b1290, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + ((poles_dim1) << (1))];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
+ }
+ if ((z__[j] == 0.f) || (poles[j + ((poles_dim1) << (1))] ==
+ 0.f)) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
+ diflj / (poles[j + ((poles_dim1) << (1))] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.f) || (poles[i__ + ((poles_dim1) << (1)
+ )] == 0.f)) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (slamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigj) - diflj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.f) || (poles[i__ + ((poles_dim1) << (1)
+ )] == 0.f)) {
+ rwork[i__] = 0.f;
+ } else {
+ rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (slamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigjp) + difrj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L40: */
+ }
+ rwork[1] = -1.f;
+ temp = snrm2_(k, &rwork[1], &c__1);
+
+/*
+ Since B and BX are complex, the following call to SGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMV( 'T', K, NRHS, ONE, BX, LDBX, WORK, 1, ZERO,
+ $ B( J, 1 ), LDB )
+*/
+
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * bx_dim1;
+ rwork[i__] = bx[i__4].r;
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemv_("T", k, nrhs, &c_b1011, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b320, &rwork[*k + 1], &
+ c__1);
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&bx[jrow + jcol * bx_dim1]);
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemv_("T", k, nrhs, &c_b1011, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b320, &rwork[*k + 1 + *
+ nrhs], &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * b_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L90: */
+ }
+ clascl_("G", &c__0, &c__0, &temp, &c_b1011, &c__1, nrhs, &b[j
+ + b_dim1], ldb, info);
+/* L100: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ ccopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + ((poles_dim1) << (1))];
+ if (z__[j] == 0.f) {
+ rwork[j] = 0.f;
+ } else {
+ rwork[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + ((difr_dim1) << (1))];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
+ ];
+ }
+/* L110: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ rwork[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + ((poles_dim1) << (1))];
+ rwork[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + ((difr_dim1) << (1))];
+ }
+/* L120: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMV
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMV( 'T', K, NRHS, ONE, B, LDB, WORK, 1, ZERO,
+ $ BX( J, 1 ), LDBX )
+*/
+
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[i__] = b[i__4].r;
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemv_("T", k, nrhs, &c_b1011, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b320, &rwork[*k + 1], &
+ c__1);
+ i__ = *k + ((*nrhs) << (1));
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = *k;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++i__;
+ rwork[i__] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L150: */
+ }
+/* L160: */
+ }
+ sgemv_("T", k, nrhs, &c_b1011, &rwork[*k + 1 + ((*nrhs) << (1)
+ )], k, &rwork[1], &c__1, &c_b320, &rwork[*k + 1 + *
+ nrhs], &c__1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = j + jcol * bx_dim1;
+ i__4 = jcol + *k;
+ i__5 = jcol + *k + *nrhs;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ bx[i__3].r = q__1.r, bx[i__3].i = q__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ ccopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ csrot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ clacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ ccopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ ccopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ ccopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L190: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ csrot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &r__1);
+/* L200: */
+ }
+ }
+
+ return 0;
+
+/* End of CLALS0 */
+
+} /* clals0_ */
+
+/* Subroutine */ int clalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, complex *b, integer *ldb, complex *bx, integer *ldbx,
+ real *u, integer *ldu, real *vt, integer *k, real *difl, real *difr,
+ real *z__, real *poles, integer *givptr, integer *givcol, integer *
+ ldgcol, integer *perm, real *givnum, real *c__, real *s, real *rwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, b_dim1, b_offset, bx_dim1, bx_offset, i__1,
+ i__2, i__3, i__4, i__5, i__6;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag, jreal,
+ inode, ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer ndimr;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), clals0_(integer *, integer *, integer *,
+ integer *, integer *, complex *, integer *, complex *, integer *,
+ integer *, integer *, integer *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, real *, real *, real *,
+ integer *), xerbla_(char *, integer *), slasdt_(integer *
+ , integer *, integer *, integer *, integer *, integer *, integer *
+ );
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, CLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, CLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by CLALSA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input) COMPLEX array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) COMPLEX array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) REAL array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) REAL array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) REAL array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ RWORK (workspace) REAL array, dimension at least
+ max ( N, (SMLSZ+1)*NRHS*3 ).
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 170.
+*/
+
+ if (*icompq == 1) {
+ goto L170;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NL, NRHS, NL, ONE, U( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = (nl * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L10: */
+ }
+/* L20: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b1011, &u[nlf + u_dim1], ldu, &
+ rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b320, &rwork[1], &
+ nl);
+ j = (nl * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L30: */
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b1011, &u[nlf + u_dim1], ldu, &
+ rwork[((nl * *nrhs) << (1)) + 1], &nl, &c_b320, &rwork[nl * *
+ nrhs + 1], &nl);
+ jreal = 0;
+ jimag = nl * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nl - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NR, NRHS, NR, ONE, U( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = (nr * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L70: */
+ }
+/* L80: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b1011, &u[nrf + u_dim1], ldu, &
+ rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b320, &rwork[1], &
+ nr);
+ j = (nr * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L90: */
+ }
+/* L100: */
+ }
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b1011, &u[nrf + u_dim1], ldu, &
+ rwork[((nr * *nrhs) << (1)) + 1], &nr, &c_b320, &rwork[nr * *
+ nrhs + 1], &nr);
+ jreal = 0;
+ jimag = nr * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nr - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ ccopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L140: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L150: */
+ }
+/* L160: */
+ }
+ goto L330;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L170:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ clals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &rwork[1], info);
+/* L180: */
+ }
+/* L190: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+
+/*
+ Since B and BX are complex, the following call to SGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NLP1, NRHS, NLP1, ONE, VT( NLF, 1 ), LDU,
+ $ B( NLF, 1 ), LDB, ZERO, BX( NLF, 1 ), LDBX )
+*/
+
+ j = (nlp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1011, &vt[nlf + vt_dim1],
+ ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b320, &
+ rwork[1], &nlp1);
+ j = (nlp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L220: */
+ }
+/* L230: */
+ }
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1011, &vt[nlf + vt_dim1],
+ ldu, &rwork[((nlp1 * *nrhs) << (1)) + 1], &nlp1, &c_b320, &
+ rwork[nlp1 * *nrhs + 1], &nlp1);
+ jreal = 0;
+ jimag = nlp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nlf + nlp1 - 1;
+ for (jrow = nlf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L240: */
+ }
+/* L250: */
+ }
+
+/*
+ Since B and BX are complex, the following call to SGEMM is
+ performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NRP1, NRHS, NRP1, ONE, VT( NRF, 1 ), LDU,
+ $ B( NRF, 1 ), LDB, ZERO, BX( NRF, 1 ), LDBX )
+*/
+
+ j = (nrp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1011, &vt[nrf + vt_dim1],
+ ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b320, &
+ rwork[1], &nrp1);
+ j = (nrp1 * *nrhs) << (1);
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1011, &vt[nrf + vt_dim1],
+ ldu, &rwork[((nrp1 * *nrhs) << (1)) + 1], &nrp1, &c_b320, &
+ rwork[nrp1 * *nrhs + 1], &nrp1);
+ jreal = 0;
+ jimag = nrp1 * *nrhs;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = nrf + nrp1 - 1;
+ for (jrow = nrf; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * bx_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ bx[i__4].r = q__1.r, bx[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+
+/* L320: */
+ }
+
+L330:
+
+ return 0;
+
+/* End of CLALSA */
+
+} /* clalsa_ */
+
+/* Subroutine */ int clalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, complex *b, integer *ldb, real *rcond,
+ integer *rank, complex *work, real *rwork, integer *iwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *), log(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static real r__;
+ static integer s, u, z__;
+ static real cs;
+ static integer bx;
+ static real sn;
+ static integer st, vt, nm1, st1;
+ static real eps;
+ static integer iwk;
+ static real tol;
+ static integer difl, difr, jcol, irwb, perm, nsub, nlvl, sqre, bxst, jrow,
+ irwu, jimag, jreal;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer irwib;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer poles, sizei, irwrb, nsize;
+ extern /* Subroutine */ int csrot_(integer *, complex *, integer *,
+ complex *, integer *, real *, real *);
+ static integer irwvt, icmpq1, icmpq2;
+ extern /* Subroutine */ int clalsa_(integer *, integer *, integer *,
+ integer *, complex *, integer *, complex *, integer *, real *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+ , real *, integer *, integer *), clascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, complex *,
+ integer *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ clacpy_(char *, integer *, integer *, complex *, integer *,
+ complex *, integer *), claset_(char *, integer *, integer
+ *, complex *, complex *, complex *, integer *), xerbla_(
+ char *, integer *), slascl_(char *, integer *, integer *,
+ real *, real *, integer *, integer *, real *, integer *, integer *
+ );
+ extern integer isamax_(integer *, real *, integer *);
+ static integer givcol;
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+ );
+ static real orgnrm;
+ static integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ static integer givptr, nrwork, irwwrk, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ CLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) REAL array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input) REAL array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) REAL
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) COMPLEX array, dimension at least
+ (N * NRHS).
+
+ RWORK (workspace) REAL array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + (SMLSIZ+1)**2),
+ where
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if ((*ldb < 1) || (*ldb < *n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if ((*rcond <= 0.f) || (*rcond >= 1.f)) {
+ *rcond = eps;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ claset_("A", &c__1, nrhs, &c_b55, &c_b55, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ clascl_("G", &c__0, &c__0, &d__[1], &c_b1011, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ csrot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ rwork[((i__) << (1)) - 1] = cs;
+ rwork[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = rwork[((j) << (1)) - 1];
+ sn = rwork[j * 2];
+ csrot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__
+ * b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ claset_("A", n, nrhs, &c_b55, &c_b55, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ irwu = 1;
+ irwvt = irwu + *n * *n;
+ irwwrk = irwvt + *n * *n;
+ irwrb = irwwrk;
+ irwib = irwrb + *n * *nrhs;
+ irwb = irwib + *n * *nrhs;
+ slaset_("A", n, n, &c_b320, &c_b1011, &rwork[irwu], n);
+ slaset_("A", n, n, &c_b320, &c_b1011, &rwork[irwvt], n);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &rwork[irwvt], n,
+ &rwork[irwu], n, &rwork[irwwrk], &c__1, &rwork[irwwrk], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to SLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L40: */
+ }
+/* L50: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1011, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b320, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1011, &rwork[irwu], n, &rwork[irwb],
+ n, &c_b320, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ tol = *rcond * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ claset_("A", &c__1, nrhs, &c_b55, &c_b55, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b1011, &c__1, nrhs, &
+ b[i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L100: */
+ }
+
+/*
+ Since B is complex, the following call to SGEMM is performed
+ in two steps (real and imaginary parts). That is for V * B
+ (in the real version of the code V' is stored in WORK).
+
+ CALL SGEMM( 'T', 'N', N, NRHS, N, ONE, WORK, N, B, LDB, ZERO,
+ $ WORK( NWORK ), N )
+*/
+
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ i__3 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__3].r;
+/* L110: */
+ }
+/* L120: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1011, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b320, &rwork[irwrb], n);
+ j = irwb - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L130: */
+ }
+/* L140: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1011, &rwork[irwvt], n, &rwork[irwb],
+ n, &c_b320, &rwork[irwib], n);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__1 = *nrhs;
+ for (jcol = 1; jcol <= i__1; ++jcol) {
+ i__2 = *n;
+ for (jrow = 1; jrow <= i__2; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__3 = jrow + jcol * b_dim1;
+ i__4 = jreal;
+ i__5 = jimag;
+ q__1.r = rwork[i__4], q__1.i = rwork[i__5];
+ b[i__3].r = q__1.r, b[i__3].i = q__1.i;
+/* L150: */
+ }
+/* L160: */
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + ((nlvl * *n) << (1));
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + ((nlvl) << (1)) * *n;
+ nrwork = givnum + ((nlvl) << (1)) * *n;
+ bx = 1;
+
+ irwrb = nrwork;
+ irwib = irwrb + *smlsiz * *nrhs;
+ irwb = irwib + *smlsiz * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + ((nlvl * *n) << (1));
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L170: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (((r__1 = e[i__], dabs(r__1)) < eps) || (i__ == nm1)) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ ccopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ ccopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b320, &c_b1011, &rwork[vt +
+ st1], n);
+ slaset_("A", &nsize, &nsize, &c_b320, &c_b1011, &rwork[u +
+ st1], n);
+ slasdq_("U", &c__0, &nsize, &nsize, &nsize, &c__0, &d__[st], &
+ e[st], &rwork[vt + st1], n, &rwork[u + st1], n, &
+ rwork[nrwork], &c__1, &rwork[nrwork], info)
+ ;
+ if (*info != 0) {
+ return 0;
+ }
+
+/*
+ In the real version, B is passed to SLASDQ and multiplied
+ internally by Q'. Here B is complex and that product is
+ computed below in two steps (real and imaginary parts).
+*/
+
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ i__4 = jrow + jcol * b_dim1;
+ rwork[j] = b[i__4].r;
+/* L180: */
+ }
+/* L190: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1011, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b320, &rwork[irwrb],
+ &nsize);
+ j = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++j;
+ rwork[j] = r_imag(&b[jrow + jcol * b_dim1]);
+/* L200: */
+ }
+/* L210: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1011, &rwork[u +
+ st1], n, &rwork[irwb], &nsize, &c_b320, &rwork[irwib],
+ &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L220: */
+ }
+/* L230: */
+ }
+
+ clacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ rwork[u + st1], n, &rwork[vt + st1], &iwork[k + st1],
+ &rwork[difl + st1], &rwork[difr + st1], &rwork[z__ +
+ st1], &rwork[poles + st1], &iwork[givptr + st1], &
+ iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &
+ rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1]
+ , &rwork[z__ + st1], &rwork[poles + st1], &iwork[
+ givptr + st1], &iwork[givcol + st1], n, &iwork[perm +
+ st1], &rwork[givnum + st1], &rwork[c__ + st1], &rwork[
+ s + st1], &rwork[nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L240: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = *rcond * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ claset_("A", &c__1, nrhs, &c_b55, &c_b55, &work[bx + i__ - 1], n);
+ } else {
+ ++(*rank);
+ clascl_("G", &c__0, &c__0, &d__[i__], &c_b1011, &c__1, nrhs, &
+ work[bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L250: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ ccopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+
+/*
+ Since B and BX are complex, the following call to SGEMM
+ is performed in two steps (real and imaginary parts).
+
+ CALL SGEMM( 'T', 'N', NSIZE, NRHS, NSIZE, ONE,
+ $ RWORK( VT+ST1 ), N, RWORK( BXST ), N, ZERO,
+ $ B( ST, 1 ), LDB )
+*/
+
+ j = bxst - *n - 1;
+ jreal = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jreal;
+ i__4 = j + jrow;
+ rwork[jreal] = work[i__4].r;
+/* L260: */
+ }
+/* L270: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1011, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b320, &rwork[irwrb], &nsize);
+ j = bxst - *n - 1;
+ jimag = irwb - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ j += *n;
+ i__3 = nsize;
+ for (jrow = 1; jrow <= i__3; ++jrow) {
+ ++jimag;
+ rwork[jimag] = r_imag(&work[j + jrow]);
+/* L280: */
+ }
+/* L290: */
+ }
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1011, &rwork[vt + st1],
+ n, &rwork[irwb], &nsize, &c_b320, &rwork[irwib], &nsize);
+ jreal = irwrb - 1;
+ jimag = irwib - 1;
+ i__2 = *nrhs;
+ for (jcol = 1; jcol <= i__2; ++jcol) {
+ i__3 = st + nsize - 1;
+ for (jrow = st; jrow <= i__3; ++jrow) {
+ ++jreal;
+ ++jimag;
+ i__4 = jrow + jcol * b_dim1;
+ i__5 = jreal;
+ i__6 = jimag;
+ q__1.r = rwork[i__5], q__1.i = rwork[i__6];
+ b[i__4].r = q__1.r, b[i__4].i = q__1.i;
+/* L300: */
+ }
+/* L310: */
+ }
+ } else {
+ clalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &rwork[u + st1], n, &rwork[vt + st1], &
+ iwork[k + st1], &rwork[difl + st1], &rwork[difr + st1], &
+ rwork[z__ + st1], &rwork[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &rwork[
+ givnum + st1], &rwork[c__ + st1], &rwork[s + st1], &rwork[
+ nrwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L320: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ clascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of CLALSD */
+
+} /* clalsd_ */
+
+doublereal clange_(char *norm, integer *m, integer *n, complex *a, integer *
+ lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLANGE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ complex matrix A.
+
+ Description
+ ===========
+
+ CLANGE returns the value
+
+ CLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in CLANGE as described
+ above.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0. When M = 0,
+ CLANGE is set to zero.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0. When N = 0,
+ CLANGE is set to zero.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The m by n matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(M,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= M when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ classq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANGE */
+
+} /* clange_ */
+
+doublereal clanhe_(char *norm, char *uplo, integer *n, complex *a, integer *
+ lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLANHE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ complex hermitian matrix A.
+
+ Description
+ ===========
+
+ CLANHE returns the value
+
+ CLANHE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in CLANHE as described
+ above.
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ hermitian matrix A is to be referenced.
+ = 'U': Upper triangular part of A is referenced
+ = 'L': Lower triangular part of A is referenced
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, CLANHE is
+ set to zero.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The hermitian matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of A contains the upper triangular part
+ of the matrix A, and the strictly lower triangular part of A
+ is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of A contains the lower triangular part of
+ the matrix A, and the strictly upper triangular part of A is
+ not referenced. Note that the imaginary parts of the diagonal
+ elements need not be set and are assumed to be zero.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
+ WORK is not referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j + j * a_dim1;
+ r__2 = value, r__3 = (r__1 = a[i__2].r, dabs(r__1));
+ value = dmax(r__2,r__3);
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (((lsame_(norm, "I")) || (lsame_(norm,
+ "O"))) || (*(unsigned char *)norm == '1')) {
+
+/* Find normI(A) ( = norm1(A), since A is hermitian). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ work[j] = sum + (r__1 = a[i__2].r, dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + j * a_dim1;
+ sum = work[j] + (r__1 = a[i__2].r, dabs(r__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = c_abs(&a[i__ + j * a_dim1]);
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ classq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ if (a[i__2].r != 0.f) {
+ i__2 = i__ + i__ * a_dim1;
+ absa = (r__1 = a[i__2].r, dabs(r__1));
+ if (scale < absa) {
+/* Computing 2nd power */
+ r__1 = scale / absa;
+ sum = sum * (r__1 * r__1) + 1.f;
+ scale = absa;
+ } else {
+/* Computing 2nd power */
+ r__1 = absa / scale;
+ sum += r__1 * r__1;
+ }
+ }
+/* L130: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHE */
+
+} /* clanhe_ */
+
+doublereal clanhs_(char *norm, integer *n, complex *a, integer *lda, real *
+ work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2;
+
+ /* Builtin functions */
+ double c_abs(complex *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int classq_(integer *, complex *, integer *, real
+ *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLANHS returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ Hessenberg matrix A.
+
+ Description
+ ===========
+
+ CLANHS returns the value
+
+ CLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in CLANHS as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, CLANHS is
+ set to zero.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The n by n upper Hessenberg matrix A; the part of A below the
+ first sub-diagonal is not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = c_abs(&a[i__ + j * a_dim1]);
+ value = dmax(r__1,r__2);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += c_abs(&a[i__ + j * a_dim1]);
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += c_abs(&a[i__ + j * a_dim1]);
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ classq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of CLANHS */
+
+} /* clanhs_ */
+
+/* Subroutine */ int clarcm_(integer *m, integer *n, real *a, integer *lda,
+ complex *b, integer *ldb, complex *c__, integer *ldc, real *rwork)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, i__1, i__2,
+ i__3, i__4, i__5;
+ real r__1;
+ complex q__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLARCM performs a very simple matrix-matrix multiplication:
+ C := A * B,
+ where A is M by M and real; B is M by N and complex;
+ C is M by N and complex.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A and of the matrix C.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns and rows of the matrix B and
+ the number of columns of the matrix C.
+ N >= 0.
+
+ A (input) REAL array, dimension (LDA, M)
+ A contains the M by M matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >=max(1,M).
+
+ B (input) REAL array, dimension (LDB, N)
+ B contains the M by N matrix B.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >=max(1,M).
+
+ C (input) COMPLEX array, dimension (LDC, N)
+ C contains the M by N matrix C.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >=max(1,M).
+
+ RWORK (workspace) REAL array, dimension (2*M*N)
+
+ =====================================================================
+
+
+ Quick return if possible.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --rwork;
+
+ /* Function Body */
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * b_dim1;
+ rwork[(j - 1) * *m + i__] = b[i__3].r;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ l = *m * *n + 1;
+ sgemm_("N", "N", m, n, m, &c_b1011, &a[a_offset], lda, &rwork[1], m, &
+ c_b320, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = l + (j - 1) * *m + i__ - 1;
+ c__[i__3].r = rwork[i__4], c__[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ rwork[(j - 1) * *m + i__] = r_imag(&b[i__ + j * b_dim1]);
+/* L50: */
+ }
+/* L60: */
+ }
+ sgemm_("N", "N", m, n, m, &c_b1011, &a[a_offset], lda, &rwork[1], m, &
+ c_b320, &rwork[l], m);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ r__1 = c__[i__4].r;
+ i__5 = l + (j - 1) * *m + i__ - 1;
+ q__1.r = r__1, q__1.i = rwork[i__5];
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L70: */
+ }
+/* L80: */
+ }
+
+ return 0;
+
+/* End of CLARCM */
+
+} /* clarcm_ */
+
+/* Subroutine */ int clarf_(char *side, integer *m, integer *n, complex *v,
+ integer *incv, complex *tau, complex *c__, integer *ldc, complex *
+ work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ complex q__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *),
+ cgemv_(char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *, complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLARF applies a complex elementary reflector H to a complex M-by-N
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a complex scalar and v is a complex vector.
+
+ If tau = 0, then H is taken to be the unit matrix.
+
+ To apply H' (the conjugate transpose of H), supply conjg(tau) instead
+ tau.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) COMPLEX array, dimension
+ (1 + (M-1)*abs(INCV)) if SIDE = 'L'
+ or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
+ The vector v in the representation of H. V is not used if
+ TAU = 0.
+
+ INCV (input) INTEGER
+ The increment between elements of v. INCV <> 0.
+
+ TAU (input) COMPLEX
+ The value tau in the representation of H.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if ((tau->r != 0.f) || (tau->i != 0.f)) {
+
+/* w := C' * v */
+
+ cgemv_("Conjugate transpose", m, n, &c_b56, &c__[c_offset], ldc, &
+ v[1], incv, &c_b55, &work[1], &c__1);
+
+/* C := C - v * w' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, n, &q__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if ((tau->r != 0.f) || (tau->i != 0.f)) {
+
+/* w := C * v */
+
+ cgemv_("No transpose", m, n, &c_b56, &c__[c_offset], ldc, &v[1],
+ incv, &c_b55, &work[1], &c__1);
+
+/* C := C - w * v' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, n, &q__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
+ ldc);
+ }
+ }
+ return 0;
+
+/* End of CLARF */
+
+} /* clarf_ */
+
+/* Subroutine */ int clarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, complex *v, integer *ldv,
+ complex *t, integer *ldt, complex *c__, integer *ldc, complex *work,
+ integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *), ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), clacgv_(integer *,
+ complex *, integer *);
+ static char transt[1];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLARFB applies a complex block reflector H or its transpose H' to a
+ complex M-by-N matrix C, from either the left or the right.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply H or H' from the Left
+ = 'R': apply H or H' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply H (No transpose)
+ = 'C': apply H' (Conjugate transpose)
+
+ DIRECT (input) CHARACTER*1
+ Indicates how H is formed from a product of elementary
+ reflectors
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Indicates how the vectors which define the elementary
+ reflectors are stored:
+ = 'C': Columnwise
+ = 'R': Rowwise
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ K (input) INTEGER
+ The order of the matrix T (= the number of elementary
+ reflectors whose product defines the block reflector).
+
+ V (input) COMPLEX array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,M) if STOREV = 'R' and SIDE = 'L'
+ (LDV,N) if STOREV = 'R' and SIDE = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
+ if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
+ if STOREV = 'R', LDV >= K.
+
+ T (input) COMPLEX array, dimension (LDT,K)
+ The triangular K-by-K matrix T in the representation of the
+ block reflector.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension (LDWORK,K)
+
+ LDWORK (input) INTEGER
+ The leading dimension of the array WORK.
+ If SIDE = 'L', LDWORK >= max(1,N);
+ if SIDE = 'R', LDWORK >= max(1,M).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 ) (first K rows)
+ ( V2 )
+ where V1 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+ clacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b56,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = *m - *k;
+ cgemm_("Conjugate transpose", "No transpose", n, k, &i__1,
+ &c_b56, &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 +
+ v_dim1], ldv, &c_b56, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = *m - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__1, n, k,
+ &q__1, &v[*k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork, &c_b56, &c__[*k + 1 +
+ c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k,
+ &c_b56, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b56,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = *n - *k;
+ cgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b56, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k +
+ 1 + v_dim1], ldv, &c_b56, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = *n - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", m, &i__1, k,
+ &q__1, &work[work_offset], ldwork, &v[*k + 1 +
+ v_dim1], ldv, &c_b56, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k,
+ &c_b56, &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 )
+ ( V2 ) (last K rows)
+ where V2 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+ clacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b56,
+ &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = *m - *k;
+ cgemm_("Conjugate transpose", "No transpose", n, k, &i__1,
+ &c_b56, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b56, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = *m - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__1, n, k,
+ &q__1, &v[v_offset], ldv, &work[work_offset],
+ ldwork, &c_b56, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k,
+ &c_b56, &v[*m - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = *m - *k + j + i__ * c_dim1;
+ i__4 = *m - *k + j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b56,
+ &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = *n - *k;
+ cgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b56, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b56, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = *n - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", m, &i__1, k,
+ &q__1, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b56, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k,
+ &c_b56, &v[*n - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (*n - *k + j) * c_dim1;
+ i__4 = i__ + (*n - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 V2 ) (V1: first K columns)
+ where V1 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+ clacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", n, k,
+ &c_b56, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = *m - *k;
+ cgemm_("Conjugate transpose", "Conjugate transpose", n, k,
+ &i__1, &c_b56, &c__[*k + 1 + c_dim1], ldc, &v[(*
+ k + 1) * v_dim1 + 1], ldv, &c_b56, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = *m - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, n, k, &q__1, &v[(*k + 1) * v_dim1 + 1], ldv,
+ &work[work_offset], ldwork, &c_b56, &c__[*k + 1
+ + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", n, k, &c_b56,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * c_dim1;
+ i__4 = j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Unit", m, k,
+ &c_b56, &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = *n - *k;
+ cgemm_("No transpose", "Conjugate transpose", m, k, &i__1,
+ &c_b56, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k
+ + 1) * v_dim1 + 1], ldv, &c_b56, &work[
+ work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = *n - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", m, &i__1, k, &q__1,
+ &work[work_offset], ldwork, &v[(*k + 1) * v_dim1
+ + 1], ldv, &c_b56, &c__[(*k + 1) * c_dim1 + 1],
+ ldc);
+ }
+
+/* W := W * V1 */
+
+ ctrmm_("Right", "Upper", "No transpose", "Unit", m, k, &c_b56,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * c_dim1;
+ i__4 = i__ + j * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 V2 ) (V2: last K columns)
+ where V2 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+ clacgv_(n, &work[j * work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", n, k,
+ &c_b56, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = *m - *k;
+ cgemm_("Conjugate transpose", "Conjugate transpose", n, k,
+ &i__1, &c_b56, &c__[c_offset], ldc, &v[v_offset],
+ ldv, &c_b56, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ ctrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = *m - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "Conjugate transpose", &
+ i__1, n, k, &q__1, &v[v_offset], ldv, &work[
+ work_offset], ldwork, &c_b56, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", n, k, &c_b56,
+ &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = *m - *k + j + i__ * c_dim1;
+ i__4 = *m - *k + j + i__ * c_dim1;
+ r_cnjg(&q__2, &work[i__ + j * work_dim1]);
+ q__1.r = c__[i__4].r - q__2.r, q__1.i = c__[i__4].i -
+ q__2.i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ ccopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ ctrmm_("Right", "Lower", "Conjugate transpose", "Unit", m, k,
+ &c_b56, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = *n - *k;
+ cgemm_("No transpose", "Conjugate transpose", m, k, &i__1,
+ &c_b56, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b56, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ ctrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b56, &t[
+ t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = *n - *k;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "No transpose", m, &i__1, k, &q__1,
+ &work[work_offset], ldwork, &v[v_offset], ldv, &
+ c_b56, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ ctrmm_("Right", "Lower", "No transpose", "Unit", m, k, &c_b56,
+ &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (*n - *k + j) * c_dim1;
+ i__4 = i__ + (*n - *k + j) * c_dim1;
+ i__5 = i__ + j * work_dim1;
+ q__1.r = c__[i__4].r - work[i__5].r, q__1.i = c__[
+ i__4].i - work[i__5].i;
+ c__[i__3].r = q__1.r, c__[i__3].i = q__1.i;
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of CLARFB */
+
+} /* clarfb_ */
+
+/* Subroutine */ int clarfg_(integer *n, complex *alpha, complex *x, integer *
+ incx, complex *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer j, knt;
+ static real beta;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ static real alphi, alphr, xnorm;
+ extern doublereal scnrm2_(integer *, complex *, integer *), slapy3_(real *
+ , real *, real *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *);
+ static real safmin, rsafmn;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLARFG generates a complex elementary reflector H of order n, such
+ that
+
+ H' * ( alpha ) = ( beta ), H' * H = I.
+ ( x ) ( 0 )
+
+ where alpha and beta are scalars, with beta real, and x is an
+ (n-1)-element complex vector. H is represented in the form
+
+ H = I - tau * ( 1 ) * ( 1 v' ) ,
+ ( v )
+
+ where tau is a complex scalar and v is a complex (n-1)-element
+ vector. Note that H is not hermitian.
+
+ If the elements of x are all zero and alpha is real, then tau = 0
+ and H is taken to be the unit matrix.
+
+ Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1 .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the elementary reflector.
+
+ ALPHA (input/output) COMPLEX
+ On entry, the value alpha.
+ On exit, it is overwritten with the value beta.
+
+ X (input/output) COMPLEX array, dimension
+ (1+(N-2)*abs(INCX))
+ On entry, the vector x.
+ On exit, it is overwritten with the vector v.
+
+ INCX (input) INTEGER
+ The increment between elements of X. INCX > 0.
+
+ TAU (output) COMPLEX
+ The value tau.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 0) {
+ tau->r = 0.f, tau->i = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ alphr = alpha->r;
+ alphi = r_imag(alpha);
+
+ if (xnorm == 0.f && alphi == 0.f) {
+
+/* H = I */
+
+ tau->r = 0.f, tau->i = 0.f;
+ } else {
+
+/* general case */
+
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = -r_sign(&r__1, &alphr);
+ safmin = slamch_("S") / slamch_("E");
+ rsafmn = 1.f / safmin;
+
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ knt = 0;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ csscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ alphi *= rsafmn;
+ alphr *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = scnrm2_(&i__1, &x[1], incx);
+ q__1.r = alphr, q__1.i = alphi;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ r__1 = slapy3_(&alphr, &alphi, &xnorm);
+ beta = -r_sign(&r__1, &alphr);
+ r__1 = (beta - alphr) / beta;
+ r__2 = -alphi / beta;
+ q__1.r = r__1, q__1.i = r__2;
+ tau->r = q__1.r, tau->i = q__1.i;
+ q__2.r = alpha->r - beta, q__2.i = alpha->i;
+ cladiv_(&q__1, &c_b56, &q__2);
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ i__1 = *n - 1;
+ cscal_(&i__1, alpha, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ alpha->r = beta, alpha->i = 0.f;
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ q__1.r = safmin * alpha->r, q__1.i = safmin * alpha->i;
+ alpha->r = q__1.r, alpha->i = q__1.i;
+/* L20: */
+ }
+ } else {
+ r__1 = (beta - alphr) / beta;
+ r__2 = -alphi / beta;
+ q__1.r = r__1, q__1.i = r__2;
+ tau->r = q__1.r, tau->i = q__1.i;
+ q__2.r = alpha->r - beta, q__2.i = alpha->i;
+ cladiv_(&q__1, &c_b56, &q__2);
+ alpha->r = q__1.r, alpha->i = q__1.i;
+ i__1 = *n - 1;
+ cscal_(&i__1, alpha, &x[1], incx);
+ alpha->r = beta, alpha->i = 0.f;
+ }
+ }
+
+ return 0;
+
+/* End of CLARFG */
+
+} /* clarfg_ */
+
+/* Subroutine */ int clarft_(char *direct, char *storev, integer *n, integer *
+ k, complex *v, integer *ldv, complex *tau, complex *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j;
+ static complex vii;
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), clacgv_(integer *, complex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLARFT forms the triangular factor T of a complex block reflector H
+ of order n, which is defined as a product of k elementary reflectors.
+
+ If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
+
+ If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
+
+ If STOREV = 'C', the vector which defines the elementary reflector
+ H(i) is stored in the i-th column of the array V, and
+
+ H = I - V * T * V'
+
+ If STOREV = 'R', the vector which defines the elementary reflector
+ H(i) is stored in the i-th row of the array V, and
+
+ H = I - V' * T * V
+
+ Arguments
+ =========
+
+ DIRECT (input) CHARACTER*1
+ Specifies the order in which the elementary reflectors are
+ multiplied to form the block reflector:
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Specifies how the vectors which define the elementary
+ reflectors are stored (see also Further Details):
+ = 'C': columnwise
+ = 'R': rowwise
+
+ N (input) INTEGER
+ The order of the block reflector H. N >= 0.
+
+ K (input) INTEGER
+ The order of the triangular factor T (= the number of
+ elementary reflectors). K >= 1.
+
+ V (input/output) COMPLEX array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,N) if STOREV = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i).
+
+ T (output) COMPLEX array, dimension (LDT,K)
+ The k by k triangular factor T of the block reflector.
+ If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
+ lower triangular. The rest of the array is not used.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ Further Details
+ ===============
+
+ The shape of the matrix V and the storage of the vectors which define
+ the H(i) is best illustrated by the following example with n = 5 and
+ k = 3. The elements equal to 1 are not stored; the corresponding
+ array elements are modified but restored on exit. The rest of the
+ array is not used.
+
+ DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
+
+ V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
+ ( v1 1 ) ( 1 v2 v2 v2 )
+ ( v1 v2 1 ) ( 1 v3 v3 )
+ ( v1 v2 v3 )
+ ( v1 v2 v3 )
+
+ DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
+
+ V = ( v1 v2 v3 ) V = ( v1 v1 1 )
+ ( v1 v2 v3 ) ( v2 v2 v2 1 )
+ ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
+ ( 1 v3 )
+ ( 1 )
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ if (tau[i__2].r == 0.f && tau[i__2].i == 0.f) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = j + i__ * t_dim1;
+ t[i__3].r = 0.f, t[i__3].i = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ i__2 = i__ + i__ * v_dim1;
+ vii.r = v[i__2].r, vii.i = v[i__2].i;
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = 1.f, v[i__2].i = 0.f;
+ if (lsame_(storev, "C")) {
+
+/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ i__4 = i__;
+ q__1.r = -tau[i__4].r, q__1.i = -tau[i__4].i;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &q__1, &v[i__
+ + v_dim1], ldv, &v[i__ + i__ * v_dim1], &c__1, &
+ c_b55, &t[i__ * t_dim1 + 1], &c__1);
+ } else {
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
+
+ if (i__ < *n) {
+ i__2 = *n - i__;
+ clacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ i__4 = i__;
+ q__1.r = -tau[i__4].r, q__1.i = -tau[i__4].i;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b55, &t[i__ * t_dim1 + 1], &c__1);
+ if (i__ < *n) {
+ i__2 = *n - i__;
+ clacgv_(&i__2, &v[i__ + (i__ + 1) * v_dim1], ldv);
+ }
+ }
+ i__2 = i__ + i__ * v_dim1;
+ v[i__2].r = vii.r, v[i__2].i = vii.i;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ctrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ + i__ * t_dim1;
+ i__3 = i__;
+ t[i__2].r = tau[i__3].r, t[i__2].i = tau[i__3].i;
+ }
+/* L20: */
+ }
+ } else {
+ for (i__ = *k; i__ >= 1; --i__) {
+ i__1 = i__;
+ if (tau[i__1].r == 0.f && tau[i__1].i == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ i__2 = j + i__ * t_dim1;
+ t[i__2].r = 0.f, t[i__2].i = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = 1.f, v[i__1].i = 0.f;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
+*/
+
+ i__1 = *n - *k + i__;
+ i__2 = *k - i__;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cgemv_("Conjugate transpose", &i__1, &i__2, &q__1, &v[
+ (i__ + 1) * v_dim1 + 1], ldv, &v[i__ * v_dim1
+ + 1], &c__1, &c_b55, &t[i__ + 1 + i__ *
+ t_dim1], &c__1);
+ i__1 = *n - *k + i__ + i__ * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ } else {
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ vii.r = v[i__1].r, vii.i = v[i__1].i;
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = 1.f, v[i__1].i = 0.f;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
+*/
+
+ i__1 = *n - *k + i__ - 1;
+ clacgv_(&i__1, &v[i__ + v_dim1], ldv);
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__;
+ i__3 = i__;
+ q__1.r = -tau[i__3].r, q__1.i = -tau[i__3].i;
+ cgemv_("No transpose", &i__1, &i__2, &q__1, &v[i__ +
+ 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
+ c_b55, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ i__1 = *n - *k + i__ - 1;
+ clacgv_(&i__1, &v[i__ + v_dim1], ldv);
+ i__1 = i__ + (*n - *k + i__) * v_dim1;
+ v[i__1].r = vii.r, v[i__1].i = vii.i;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ ctrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ }
+ i__1 = i__ + i__ * t_dim1;
+ i__2 = i__;
+ t[i__1].r = tau[i__2].r, t[i__1].i = tau[i__2].i;
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of CLARFT */
+
+} /* clarft_ */
+
+/* Subroutine */ int clarfx_(char *side, integer *m, integer *n, complex *v,
+ complex *tau, complex *c__, integer *ldc, complex *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8,
+ i__9, i__10, i__11;
+ complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8, q__9, q__10,
+ q__11, q__12, q__13, q__14, q__15, q__16, q__17, q__18, q__19;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer j;
+ static complex t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int cgerc_(integer *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLARFX applies a complex elementary reflector H to a complex m by n
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a complex scalar and v is a complex vector.
+
+ If tau = 0, then H is taken to be the unit matrix
+
+ This version uses inline code if H has order < 11.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) COMPLEX array, dimension (M) if SIDE = 'L'
+ or (N) if SIDE = 'R'
+ The vector v in the representation of H.
+
+ TAU (input) COMPLEX
+ The value tau in the representation of H.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+ WORK is not referenced if H has order < 11.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (tau->r == 0.f && tau->i == 0.f) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/*
+ Code for general M
+
+ w := C'*v
+*/
+
+ cgemv_("Conjugate transpose", m, n, &c_b56, &c__[c_offset], ldc, &v[1]
+ , &c__1, &c_b55, &work[1], &c__1);
+
+/* C := C - tau * v * w' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, n, &q__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ q__3.r = tau->r * v[1].r - tau->i * v[1].i, q__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ r_cnjg(&q__4, &v[1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i = q__3.r * q__4.i
+ + q__3.i * q__4.r;
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ t1.r = q__1.r, t1.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, q__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ i__4 = j * c_dim1 + 3;
+ q__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
+ i__4 = j * c_dim1 + 3;
+ q__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i;
+ i__5 = j * c_dim1 + 4;
+ q__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__1.r = q__2.r + q__7.r, q__1.i = q__2.i + q__7.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__4.r = q__5.r + q__6.r, q__4.i = q__5.i + q__6.i;
+ i__4 = j * c_dim1 + 3;
+ q__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__3.r = q__4.r + q__7.r, q__3.i = q__4.i + q__7.i;
+ i__5 = j * c_dim1 + 4;
+ q__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__2.r = q__3.r + q__8.r, q__2.i = q__3.i + q__8.i;
+ i__6 = j * c_dim1 + 5;
+ q__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ q__1.r = q__2.r + q__9.r, q__1.i = q__2.i + q__9.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__5.r = q__6.r + q__7.r, q__5.i = q__6.i + q__7.i;
+ i__4 = j * c_dim1 + 3;
+ q__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__4.r = q__5.r + q__8.r, q__4.i = q__5.i + q__8.i;
+ i__5 = j * c_dim1 + 4;
+ q__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__3.r = q__4.r + q__9.r, q__3.i = q__4.i + q__9.i;
+ i__6 = j * c_dim1 + 5;
+ q__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__2.r = q__3.r + q__10.r, q__2.i = q__3.i + q__10.i;
+ i__7 = j * c_dim1 + 6;
+ q__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__1.r = q__2.r + q__11.r, q__1.i = q__2.i + q__11.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__6.r = q__7.r + q__8.r, q__6.i = q__7.i + q__8.i;
+ i__4 = j * c_dim1 + 3;
+ q__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__5.r = q__6.r + q__9.r, q__5.i = q__6.i + q__9.i;
+ i__5 = j * c_dim1 + 4;
+ q__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__4.r = q__5.r + q__10.r, q__4.i = q__5.i + q__10.i;
+ i__6 = j * c_dim1 + 5;
+ q__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__3.r = q__4.r + q__11.r, q__3.i = q__4.i + q__11.i;
+ i__7 = j * c_dim1 + 6;
+ q__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__2.r = q__3.r + q__12.r, q__2.i = q__3.i + q__12.i;
+ i__8 = j * c_dim1 + 7;
+ q__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__1.r = q__2.r + q__13.r, q__1.i = q__2.i + q__13.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__7.r = q__8.r + q__9.r, q__7.i = q__8.i + q__9.i;
+ i__4 = j * c_dim1 + 3;
+ q__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__6.r = q__7.r + q__10.r, q__6.i = q__7.i + q__10.i;
+ i__5 = j * c_dim1 + 4;
+ q__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__5.r = q__6.r + q__11.r, q__5.i = q__6.i + q__11.i;
+ i__6 = j * c_dim1 + 5;
+ q__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__4.r = q__5.r + q__12.r, q__4.i = q__5.i + q__12.i;
+ i__7 = j * c_dim1 + 6;
+ q__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__3.r = q__4.r + q__13.r, q__3.i = q__4.i + q__13.i;
+ i__8 = j * c_dim1 + 7;
+ q__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__2.r = q__3.r + q__14.r, q__2.i = q__3.i + q__14.i;
+ i__9 = j * c_dim1 + 8;
+ q__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__1.r = q__2.r + q__15.r, q__1.i = q__2.i + q__15.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ r_cnjg(&q__1, &v[9]);
+ v9.r = q__1.r, v9.i = q__1.i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__8.r = q__9.r + q__10.r, q__8.i = q__9.i + q__10.i;
+ i__4 = j * c_dim1 + 3;
+ q__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__7.r = q__8.r + q__11.r, q__7.i = q__8.i + q__11.i;
+ i__5 = j * c_dim1 + 4;
+ q__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__6.r = q__7.r + q__12.r, q__6.i = q__7.i + q__12.i;
+ i__6 = j * c_dim1 + 5;
+ q__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__5.r = q__6.r + q__13.r, q__5.i = q__6.i + q__13.i;
+ i__7 = j * c_dim1 + 6;
+ q__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__4.r = q__5.r + q__14.r, q__4.i = q__5.i + q__14.i;
+ i__8 = j * c_dim1 + 7;
+ q__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__3.r = q__4.r + q__15.r, q__3.i = q__4.i + q__15.i;
+ i__9 = j * c_dim1 + 8;
+ q__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__2.r = q__3.r + q__16.r, q__2.i = q__3.i + q__16.i;
+ i__10 = j * c_dim1 + 9;
+ q__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__1.r = q__2.r + q__17.r, q__1.i = q__2.i + q__17.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ r_cnjg(&q__1, &v[1]);
+ v1.r = q__1.r, v1.i = q__1.i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ r_cnjg(&q__1, &v[2]);
+ v2.r = q__1.r, v2.i = q__1.i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ r_cnjg(&q__1, &v[3]);
+ v3.r = q__1.r, v3.i = q__1.i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ r_cnjg(&q__1, &v[4]);
+ v4.r = q__1.r, v4.i = q__1.i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ r_cnjg(&q__1, &v[5]);
+ v5.r = q__1.r, v5.i = q__1.i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ r_cnjg(&q__1, &v[6]);
+ v6.r = q__1.r, v6.i = q__1.i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ r_cnjg(&q__1, &v[7]);
+ v7.r = q__1.r, v7.i = q__1.i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ r_cnjg(&q__1, &v[8]);
+ v8.r = q__1.r, v8.i = q__1.i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ r_cnjg(&q__1, &v[9]);
+ v9.r = q__1.r, v9.i = q__1.i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ r_cnjg(&q__1, &v[10]);
+ v10.r = q__1.r, v10.i = q__1.i;
+ r_cnjg(&q__2, &v10);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t10.r = q__1.r, t10.i = q__1.i;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j * c_dim1 + 1;
+ q__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j * c_dim1 + 2;
+ q__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__9.r = q__10.r + q__11.r, q__9.i = q__10.i + q__11.i;
+ i__4 = j * c_dim1 + 3;
+ q__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__8.r = q__9.r + q__12.r, q__8.i = q__9.i + q__12.i;
+ i__5 = j * c_dim1 + 4;
+ q__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__7.r = q__8.r + q__13.r, q__7.i = q__8.i + q__13.i;
+ i__6 = j * c_dim1 + 5;
+ q__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__6.r = q__7.r + q__14.r, q__6.i = q__7.i + q__14.i;
+ i__7 = j * c_dim1 + 6;
+ q__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__5.r = q__6.r + q__15.r, q__5.i = q__6.i + q__15.i;
+ i__8 = j * c_dim1 + 7;
+ q__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__4.r = q__5.r + q__16.r, q__4.i = q__5.i + q__16.i;
+ i__9 = j * c_dim1 + 8;
+ q__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__3.r = q__4.r + q__17.r, q__3.i = q__4.i + q__17.i;
+ i__10 = j * c_dim1 + 9;
+ q__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__2.r = q__3.r + q__18.r, q__2.i = q__3.i + q__18.i;
+ i__11 = j * c_dim1 + 10;
+ q__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, q__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ q__1.r = q__2.r + q__19.r, q__1.i = q__2.i + q__19.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j * c_dim1 + 1;
+ i__3 = j * c_dim1 + 1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 2;
+ i__3 = j * c_dim1 + 2;
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 3;
+ i__3 = j * c_dim1 + 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 4;
+ i__3 = j * c_dim1 + 4;
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 5;
+ i__3 = j * c_dim1 + 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 6;
+ i__3 = j * c_dim1 + 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 7;
+ i__3 = j * c_dim1 + 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 8;
+ i__3 = j * c_dim1 + 8;
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 9;
+ i__3 = j * c_dim1 + 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j * c_dim1 + 10;
+ i__3 = j * c_dim1 + 10;
+ q__2.r = sum.r * t10.r - sum.i * t10.i, q__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/*
+ Code for general N
+
+ w := C * v
+*/
+
+ cgemv_("No transpose", m, n, &c_b56, &c__[c_offset], ldc, &v[1], &
+ c__1, &c_b55, &work[1], &c__1);
+
+/* C := C - tau * w * v' */
+
+ q__1.r = -tau->r, q__1.i = -tau->i;
+ cgerc_(m, n, &q__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset],
+ ldc);
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ q__3.r = tau->r * v[1].r - tau->i * v[1].i, q__3.i = tau->r * v[1].i
+ + tau->i * v[1].r;
+ r_cnjg(&q__4, &v[1]);
+ q__2.r = q__3.r * q__4.r - q__3.i * q__4.i, q__2.i = q__3.r * q__4.i
+ + q__3.i * q__4.r;
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ t1.r = q__1.r, t1.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__1.r = t1.r * c__[i__3].r - t1.i * c__[i__3].i, q__1.i = t1.r *
+ c__[i__3].i + t1.i * c__[i__3].r;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__2.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__2.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__3.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__3.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__3.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__3.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__4.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__4.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__2.r = q__3.r + q__4.r, q__2.i = q__3.i + q__4.i;
+ i__4 = j + c_dim1 * 3;
+ q__5.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__5.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__4.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__4.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__5.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__5.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__3.r = q__4.r + q__5.r, q__3.i = q__4.i + q__5.i;
+ i__4 = j + c_dim1 * 3;
+ q__6.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__6.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__2.r = q__3.r + q__6.r, q__2.i = q__3.i + q__6.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__7.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__7.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__1.r = q__2.r + q__7.r, q__1.i = q__2.i + q__7.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__5.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__5.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__6.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__6.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__4.r = q__5.r + q__6.r, q__4.i = q__5.i + q__6.i;
+ i__4 = j + c_dim1 * 3;
+ q__7.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__7.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__3.r = q__4.r + q__7.r, q__3.i = q__4.i + q__7.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__8.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__8.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__2.r = q__3.r + q__8.r, q__2.i = q__3.i + q__8.i;
+ i__6 = j + c_dim1 * 5;
+ q__9.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__9.i = v5.r *
+ c__[i__6].i + v5.i * c__[i__6].r;
+ q__1.r = q__2.r + q__9.r, q__1.i = q__2.i + q__9.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__6.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__6.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__7.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__7.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__5.r = q__6.r + q__7.r, q__5.i = q__6.i + q__7.i;
+ i__4 = j + c_dim1 * 3;
+ q__8.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__8.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__4.r = q__5.r + q__8.r, q__4.i = q__5.i + q__8.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__9.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__9.i = v4.r *
+ c__[i__5].i + v4.i * c__[i__5].r;
+ q__3.r = q__4.r + q__9.r, q__3.i = q__4.i + q__9.i;
+ i__6 = j + c_dim1 * 5;
+ q__10.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__10.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__2.r = q__3.r + q__10.r, q__2.i = q__3.i + q__10.i;
+ i__7 = j + c_dim1 * 6;
+ q__11.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__11.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__1.r = q__2.r + q__11.r, q__1.i = q__2.i + q__11.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__7.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__7.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__8.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__8.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__6.r = q__7.r + q__8.r, q__6.i = q__7.i + q__8.i;
+ i__4 = j + c_dim1 * 3;
+ q__9.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__9.i = v3.r *
+ c__[i__4].i + v3.i * c__[i__4].r;
+ q__5.r = q__6.r + q__9.r, q__5.i = q__6.i + q__9.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__10.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__10.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__4.r = q__5.r + q__10.r, q__4.i = q__5.i + q__10.i;
+ i__6 = j + c_dim1 * 5;
+ q__11.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__11.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__3.r = q__4.r + q__11.r, q__3.i = q__4.i + q__11.i;
+ i__7 = j + c_dim1 * 6;
+ q__12.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__12.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__2.r = q__3.r + q__12.r, q__2.i = q__3.i + q__12.i;
+ i__8 = j + c_dim1 * 7;
+ q__13.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__13.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__1.r = q__2.r + q__13.r, q__1.i = q__2.i + q__13.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__8.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__8.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__9.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__9.i = v2.r *
+ c__[i__3].i + v2.i * c__[i__3].r;
+ q__7.r = q__8.r + q__9.r, q__7.i = q__8.i + q__9.i;
+ i__4 = j + c_dim1 * 3;
+ q__10.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__10.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__6.r = q__7.r + q__10.r, q__6.i = q__7.i + q__10.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__11.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__11.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__5.r = q__6.r + q__11.r, q__5.i = q__6.i + q__11.i;
+ i__6 = j + c_dim1 * 5;
+ q__12.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__12.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__4.r = q__5.r + q__12.r, q__4.i = q__5.i + q__12.i;
+ i__7 = j + c_dim1 * 6;
+ q__13.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__13.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__3.r = q__4.r + q__13.r, q__3.i = q__4.i + q__13.i;
+ i__8 = j + c_dim1 * 7;
+ q__14.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__14.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__2.r = q__3.r + q__14.r, q__2.i = q__3.i + q__14.i;
+ i__9 = j + ((c_dim1) << (3));
+ q__15.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__15.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__1.r = q__2.r + q__15.r, q__1.i = q__2.i + q__15.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__9.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__9.i = v1.r *
+ c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__10.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__10.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__8.r = q__9.r + q__10.r, q__8.i = q__9.i + q__10.i;
+ i__4 = j + c_dim1 * 3;
+ q__11.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__11.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__7.r = q__8.r + q__11.r, q__7.i = q__8.i + q__11.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__12.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__12.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__6.r = q__7.r + q__12.r, q__6.i = q__7.i + q__12.i;
+ i__6 = j + c_dim1 * 5;
+ q__13.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__13.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__5.r = q__6.r + q__13.r, q__5.i = q__6.i + q__13.i;
+ i__7 = j + c_dim1 * 6;
+ q__14.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__14.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__4.r = q__5.r + q__14.r, q__4.i = q__5.i + q__14.i;
+ i__8 = j + c_dim1 * 7;
+ q__15.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__15.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__3.r = q__4.r + q__15.r, q__3.i = q__4.i + q__15.i;
+ i__9 = j + ((c_dim1) << (3));
+ q__16.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__16.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__2.r = q__3.r + q__16.r, q__2.i = q__3.i + q__16.i;
+ i__10 = j + c_dim1 * 9;
+ q__17.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__17.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__1.r = q__2.r + q__17.r, q__1.i = q__2.i + q__17.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1.r = v[1].r, v1.i = v[1].i;
+ r_cnjg(&q__2, &v1);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t1.r = q__1.r, t1.i = q__1.i;
+ v2.r = v[2].r, v2.i = v[2].i;
+ r_cnjg(&q__2, &v2);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t2.r = q__1.r, t2.i = q__1.i;
+ v3.r = v[3].r, v3.i = v[3].i;
+ r_cnjg(&q__2, &v3);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t3.r = q__1.r, t3.i = q__1.i;
+ v4.r = v[4].r, v4.i = v[4].i;
+ r_cnjg(&q__2, &v4);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t4.r = q__1.r, t4.i = q__1.i;
+ v5.r = v[5].r, v5.i = v[5].i;
+ r_cnjg(&q__2, &v5);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t5.r = q__1.r, t5.i = q__1.i;
+ v6.r = v[6].r, v6.i = v[6].i;
+ r_cnjg(&q__2, &v6);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t6.r = q__1.r, t6.i = q__1.i;
+ v7.r = v[7].r, v7.i = v[7].i;
+ r_cnjg(&q__2, &v7);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t7.r = q__1.r, t7.i = q__1.i;
+ v8.r = v[8].r, v8.i = v[8].i;
+ r_cnjg(&q__2, &v8);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t8.r = q__1.r, t8.i = q__1.i;
+ v9.r = v[9].r, v9.i = v[9].i;
+ r_cnjg(&q__2, &v9);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t9.r = q__1.r, t9.i = q__1.i;
+ v10.r = v[10].r, v10.i = v[10].i;
+ r_cnjg(&q__2, &v10);
+ q__1.r = tau->r * q__2.r - tau->i * q__2.i, q__1.i = tau->r * q__2.i
+ + tau->i * q__2.r;
+ t10.r = q__1.r, t10.i = q__1.i;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j + c_dim1;
+ q__10.r = v1.r * c__[i__2].r - v1.i * c__[i__2].i, q__10.i = v1.r
+ * c__[i__2].i + v1.i * c__[i__2].r;
+ i__3 = j + ((c_dim1) << (1));
+ q__11.r = v2.r * c__[i__3].r - v2.i * c__[i__3].i, q__11.i = v2.r
+ * c__[i__3].i + v2.i * c__[i__3].r;
+ q__9.r = q__10.r + q__11.r, q__9.i = q__10.i + q__11.i;
+ i__4 = j + c_dim1 * 3;
+ q__12.r = v3.r * c__[i__4].r - v3.i * c__[i__4].i, q__12.i = v3.r
+ * c__[i__4].i + v3.i * c__[i__4].r;
+ q__8.r = q__9.r + q__12.r, q__8.i = q__9.i + q__12.i;
+ i__5 = j + ((c_dim1) << (2));
+ q__13.r = v4.r * c__[i__5].r - v4.i * c__[i__5].i, q__13.i = v4.r
+ * c__[i__5].i + v4.i * c__[i__5].r;
+ q__7.r = q__8.r + q__13.r, q__7.i = q__8.i + q__13.i;
+ i__6 = j + c_dim1 * 5;
+ q__14.r = v5.r * c__[i__6].r - v5.i * c__[i__6].i, q__14.i = v5.r
+ * c__[i__6].i + v5.i * c__[i__6].r;
+ q__6.r = q__7.r + q__14.r, q__6.i = q__7.i + q__14.i;
+ i__7 = j + c_dim1 * 6;
+ q__15.r = v6.r * c__[i__7].r - v6.i * c__[i__7].i, q__15.i = v6.r
+ * c__[i__7].i + v6.i * c__[i__7].r;
+ q__5.r = q__6.r + q__15.r, q__5.i = q__6.i + q__15.i;
+ i__8 = j + c_dim1 * 7;
+ q__16.r = v7.r * c__[i__8].r - v7.i * c__[i__8].i, q__16.i = v7.r
+ * c__[i__8].i + v7.i * c__[i__8].r;
+ q__4.r = q__5.r + q__16.r, q__4.i = q__5.i + q__16.i;
+ i__9 = j + ((c_dim1) << (3));
+ q__17.r = v8.r * c__[i__9].r - v8.i * c__[i__9].i, q__17.i = v8.r
+ * c__[i__9].i + v8.i * c__[i__9].r;
+ q__3.r = q__4.r + q__17.r, q__3.i = q__4.i + q__17.i;
+ i__10 = j + c_dim1 * 9;
+ q__18.r = v9.r * c__[i__10].r - v9.i * c__[i__10].i, q__18.i =
+ v9.r * c__[i__10].i + v9.i * c__[i__10].r;
+ q__2.r = q__3.r + q__18.r, q__2.i = q__3.i + q__18.i;
+ i__11 = j + c_dim1 * 10;
+ q__19.r = v10.r * c__[i__11].r - v10.i * c__[i__11].i, q__19.i =
+ v10.r * c__[i__11].i + v10.i * c__[i__11].r;
+ q__1.r = q__2.r + q__19.r, q__1.i = q__2.i + q__19.i;
+ sum.r = q__1.r, sum.i = q__1.i;
+ i__2 = j + c_dim1;
+ i__3 = j + c_dim1;
+ q__2.r = sum.r * t1.r - sum.i * t1.i, q__2.i = sum.r * t1.i +
+ sum.i * t1.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (1));
+ i__3 = j + ((c_dim1) << (1));
+ q__2.r = sum.r * t2.r - sum.i * t2.i, q__2.i = sum.r * t2.i +
+ sum.i * t2.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 3;
+ i__3 = j + c_dim1 * 3;
+ q__2.r = sum.r * t3.r - sum.i * t3.i, q__2.i = sum.r * t3.i +
+ sum.i * t3.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (2));
+ i__3 = j + ((c_dim1) << (2));
+ q__2.r = sum.r * t4.r - sum.i * t4.i, q__2.i = sum.r * t4.i +
+ sum.i * t4.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 5;
+ i__3 = j + c_dim1 * 5;
+ q__2.r = sum.r * t5.r - sum.i * t5.i, q__2.i = sum.r * t5.i +
+ sum.i * t5.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 6;
+ i__3 = j + c_dim1 * 6;
+ q__2.r = sum.r * t6.r - sum.i * t6.i, q__2.i = sum.r * t6.i +
+ sum.i * t6.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 7;
+ i__3 = j + c_dim1 * 7;
+ q__2.r = sum.r * t7.r - sum.i * t7.i, q__2.i = sum.r * t7.i +
+ sum.i * t7.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + ((c_dim1) << (3));
+ i__3 = j + ((c_dim1) << (3));
+ q__2.r = sum.r * t8.r - sum.i * t8.i, q__2.i = sum.r * t8.i +
+ sum.i * t8.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 9;
+ i__3 = j + c_dim1 * 9;
+ q__2.r = sum.r * t9.r - sum.i * t9.i, q__2.i = sum.r * t9.i +
+ sum.i * t9.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+ i__2 = j + c_dim1 * 10;
+ i__3 = j + c_dim1 * 10;
+ q__2.r = sum.r * t10.r - sum.i * t10.i, q__2.i = sum.r * t10.i +
+ sum.i * t10.r;
+ q__1.r = c__[i__3].r - q__2.r, q__1.i = c__[i__3].i - q__2.i;
+ c__[i__2].r = q__1.r, c__[i__2].i = q__1.i;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of CLARFX */
+
+} /* clarfx_ */
+
+/* Subroutine */ int clascl_(char *type__, integer *kl, integer *ku, real *
+ cfrom, real *cto, integer *m, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, k1, k2, k3, k4;
+ static real mul, cto1;
+ static logical done;
+ static real ctoc;
+ extern logical lsame_(char *, char *);
+ static integer itype;
+ static real cfrom1;
+ extern doublereal slamch_(char *);
+ static real cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ CLASCL multiplies the M by N complex matrix A by the real scalar
+ CTO/CFROM. This is done without over/underflow as long as the final
+ result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
+ A may be full, upper triangular, lower triangular, upper Hessenberg,
+ or banded.
+
+ Arguments
+ =========
+
+ TYPE (input) CHARACTER*1
+ TYPE indices the storage type of the input matrix.
+ = 'G': A is a full matrix.
+ = 'L': A is a lower triangular matrix.
+ = 'U': A is an upper triangular matrix.
+ = 'H': A is an upper Hessenberg matrix.
+ = 'B': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the lower
+ half stored.
+ = 'Q': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the upper
+ half stored.
+ = 'Z': A is a band matrix with lower bandwidth KL and upper
+ bandwidth KU.
+
+ KL (input) INTEGER
+ The lower bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ KU (input) INTEGER
+ The upper bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ CFROM (input) REAL
+ CTO (input) REAL
+ The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
+ without over/underflow if the final result CTO*A(I,J)/CFROM
+ can be represented without over/underflow. CFROM must be
+ nonzero.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,M)
+ The matrix to be multiplied by CTO/CFROM. See TYPE for the
+ storage type.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ INFO (output) INTEGER
+ 0 - successful exit
+ <0 - if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.f) {
+ *info = -4;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (((*n < 0) || (itype == 4 && *n != *m)) || (itype == 5 && *n !=
+ *m)) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if ((*kl < 0) || (*kl > max(i__1,0))) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (((*ku < 0) || (*ku > max(i__1,0))) || (((itype == 4) || (
+ itype == 5)) && *kl != *ku)) {
+ *info = -3;
+ } else if (((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda
+ < *ku + 1)) || (itype == 6 && *lda < ((*kl) << (1)) + *ku
+ + 1)) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*m == 0)) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ cto1 = ctoc / bignum;
+ if (dabs(cfrom1) > dabs(ctoc) && ctoc != 0.f) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (dabs(cto1) > dabs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = ((*kl) << (1)) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + j * a_dim1;
+ q__1.r = mul * a[i__4].r, q__1.i = mul * a[i__4].i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of CLASCL */
+
+} /* clascl_ */
+
+/* Subroutine */ int claset_(char *uplo, integer *m, integer *n, complex *
+ alpha, complex *beta, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLASET initializes a 2-D array A to BETA on the diagonal and
+ ALPHA on the offdiagonals.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be set.
+ = 'U': Upper triangular part is set. The lower triangle
+ is unchanged.
+ = 'L': Lower triangular part is set. The upper triangle
+ is unchanged.
+ Otherwise: All of the matrix A is set.
+
+ M (input) INTEGER
+ On entry, M specifies the number of rows of A.
+
+ N (input) INTEGER
+ On entry, N specifies the number of columns of A.
+
+ ALPHA (input) COMPLEX
+ All the offdiagonal array elements are set to ALPHA.
+
+ BETA (input) COMPLEX
+ All the diagonal array elements are set to BETA.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n, i.ne.j;
+ A(i,i) = BETA , 1 <= i <= min(m,n)
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/*
+ Set the diagonal to BETA and the strictly upper triangular
+ part of the array to ALPHA.
+*/
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L30: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/*
+ Set the diagonal to BETA and the strictly lower triangular
+ part of the array to ALPHA.
+*/
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L40: */
+ }
+/* L50: */
+ }
+ i__1 = min(*n,*m);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L60: */
+ }
+
+ } else {
+
+/*
+ Set the array to BETA on the diagonal and ALPHA on the
+ offdiagonal.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = alpha->r, a[i__3].i = alpha->i;
+/* L70: */
+ }
+/* L80: */
+ }
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ a[i__2].r = beta->r, a[i__2].i = beta->i;
+/* L90: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASET */
+
+} /* claset_ */
+
+/* Subroutine */ int clasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, real *c__, real *s, complex *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ static integer i__, j, info;
+ static complex temp;
+ extern logical lsame_(char *, char *);
+ static real ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ CLASR performs the transformation
+
+ A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
+
+ A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
+
+ where A is an m by n complex matrix and P is an orthogonal matrix,
+ consisting of a sequence of plane rotations determined by the
+ parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
+ and z = n when SIDE = 'R' or 'r' ):
+
+ When DIRECT = 'F' or 'f' ( Forward sequence ) then
+
+ P = P( z - 1 )*...*P( 2 )*P( 1 ),
+
+ and when DIRECT = 'B' or 'b' ( Backward sequence ) then
+
+ P = P( 1 )*P( 2 )*...*P( z - 1 ),
+
+ where P( k ) is a plane rotation matrix for the following planes:
+
+ when PIVOT = 'V' or 'v' ( Variable pivot ),
+ the plane ( k, k + 1 )
+
+ when PIVOT = 'T' or 't' ( Top pivot ),
+ the plane ( 1, k + 1 )
+
+ when PIVOT = 'B' or 'b' ( Bottom pivot ),
+ the plane ( k, z )
+
+ c( k ) and s( k ) must contain the cosine and sine that define the
+ matrix P( k ). The two by two plane rotation part of the matrix
+ P( k ), R( k ), is assumed to be of the form
+
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ Specifies whether the plane rotation matrix P is applied to
+ A on the left or the right.
+ = 'L': Left, compute A := P*A
+ = 'R': Right, compute A:= A*P'
+
+ DIRECT (input) CHARACTER*1
+ Specifies whether P is a forward or backward sequence of
+ plane rotations.
+ = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
+ = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
+
+ PIVOT (input) CHARACTER*1
+ Specifies the plane for which P(k) is a plane rotation
+ matrix.
+ = 'V': Variable pivot, the plane (k,k+1)
+ = 'T': Top pivot, the plane (1,k+1)
+ = 'B': Bottom pivot, the plane (k,z)
+
+ M (input) INTEGER
+ The number of rows of the matrix A. If m <= 1, an immediate
+ return is effected.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. If n <= 1, an
+ immediate return is effected.
+
+ C, S (input) REAL arrays, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ c(k) and s(k) contain the cosine and sine that define the
+ matrix P(k). The two by two plane rotation part of the
+ matrix P(k), R(k), is assumed to be of the form
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ The m by n matrix A. On exit, A is overwritten by P*A if
+ SIDE = 'R' or by A*P' if SIDE = 'L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! ((lsame_(side, "L")) || (lsame_(side, "R")))) {
+ info = 1;
+ } else if (! (((lsame_(pivot, "V")) || (lsame_(
+ pivot, "T"))) || (lsame_(pivot, "B")))) {
+ info = 2;
+ } else if (! ((lsame_(direct, "F")) || (lsame_(
+ direct, "B")))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("CLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + 1 + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = j + i__ * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = j + i__ * a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + 1 + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + 1 + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + i__ * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = j + i__ * a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ * a_dim1 + 1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ * a_dim1 + 1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ * a_dim1 + 1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = j + i__ * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = j + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ q__2.r = stemp * a[i__4].r, q__2.i = stemp * a[
+ i__4].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = *m + i__ * a_dim1;
+ i__4 = *m + i__ * a_dim1;
+ q__2.r = ctemp * a[i__4].r, q__2.i = ctemp * a[
+ i__4].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = j + i__ * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = j + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ q__2.r = stemp * a[i__3].r, q__2.i = stemp * a[
+ i__3].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = *m + i__ * a_dim1;
+ i__3 = *m + i__ * a_dim1;
+ q__2.r = ctemp * a[i__3].r, q__2.i = ctemp * a[
+ i__3].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + (j + 1) * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ + j * a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + (j + 1) * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + (j + 1) * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + j * a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ + j * a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__4 = i__ + a_dim1;
+ q__3.r = stemp * a[i__4].r, q__3.i = stemp * a[
+ i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__4 = i__ + a_dim1;
+ q__3.r = ctemp * a[i__4].r, q__3.i = ctemp * a[
+ i__4].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ q__2.r = ctemp * temp.r, q__2.i = ctemp * temp.i;
+ i__3 = i__ + a_dim1;
+ q__3.r = stemp * a[i__3].r, q__3.i = stemp * a[
+ i__3].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + a_dim1;
+ q__2.r = stemp * temp.r, q__2.i = stemp * temp.i;
+ i__3 = i__ + a_dim1;
+ q__3.r = ctemp * a[i__3].r, q__3.i = ctemp * a[
+ i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ temp.r = a[i__3].r, temp.i = a[i__3].i;
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ q__2.r = stemp * a[i__4].r, q__2.i = stemp * a[
+ i__4].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+ i__3 = i__ + *n * a_dim1;
+ i__4 = i__ + *n * a_dim1;
+ q__2.r = ctemp * a[i__4].r, q__2.i = ctemp * a[
+ i__4].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__3].r = q__1.r, a[i__3].i = q__1.i;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ temp.r = a[i__2].r, temp.i = a[i__2].i;
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ q__2.r = stemp * a[i__3].r, q__2.i = stemp * a[
+ i__3].i;
+ q__3.r = ctemp * temp.r, q__3.i = ctemp * temp.i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i +
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = i__ + *n * a_dim1;
+ i__3 = i__ + *n * a_dim1;
+ q__2.r = ctemp * a[i__3].r, q__2.i = ctemp * a[
+ i__3].i;
+ q__3.r = stemp * temp.r, q__3.i = stemp * temp.i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i -
+ q__3.i;
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CLASR */
+
+} /* clasr_ */
+
+/* Subroutine */ int classq_(integer *n, complex *x, integer *incx, real *
+ scale, real *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+
+ /* Local variables */
+ static integer ix;
+ static real temp1;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLASSQ returns the values scl and ssq such that
+
+ ( scl**2 )*ssq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
+
+ where x( i ) = abs( X( 1 + ( i - 1 )*INCX ) ). The value of sumsq is
+ assumed to be at least unity and the value of ssq will then satisfy
+
+ 1.0 .le. ssq .le. ( sumsq + 2*n ).
+
+ scale is assumed to be non-negative and scl returns the value
+
+ scl = max( scale, abs( real( x( i ) ) ), abs( aimag( x( i ) ) ) ),
+ i
+
+ scale and sumsq must be supplied in SCALE and SUMSQ respectively.
+ SCALE and SUMSQ are overwritten by scl and ssq respectively.
+
+ The routine makes only one pass through the vector X.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of elements to be used from the vector X.
+
+ X (input) COMPLEX array, dimension (N)
+ The vector x as described above.
+ x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
+
+ INCX (input) INTEGER
+ The increment between successive values of the vector X.
+ INCX > 0.
+
+ SCALE (input/output) REAL
+ On entry, the value scale in the equation above.
+ On exit, SCALE is overwritten with the value scl .
+
+ SUMSQ (input/output) REAL
+ On entry, the value sumsq in the equation above.
+ On exit, SUMSQ is overwritten with the value ssq .
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ i__3 = ix;
+ if (x[i__3].r != 0.f) {
+ i__3 = ix;
+ temp1 = (r__1 = x[i__3].r, dabs(r__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ r__1 = *scale / temp1;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp1 / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+ if (r_imag(&x[ix]) != 0.f) {
+ temp1 = (r__1 = r_imag(&x[ix]), dabs(r__1));
+ if (*scale < temp1) {
+/* Computing 2nd power */
+ r__1 = *scale / temp1;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = temp1;
+ } else {
+/* Computing 2nd power */
+ r__1 = temp1 / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASSQ */
+
+} /* classq_ */
+
+/* Subroutine */ int claswp_(integer *n, complex *a, integer *lda, integer *
+ k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5, i__6;
+
+ /* Local variables */
+ static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ static complex temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CLASWP performs a series of row interchanges on the matrix A.
+ One row interchange is initiated for each of rows K1 through K2 of A.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of columns of the matrix A.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the matrix of column dimension N to which the row
+ interchanges will be applied.
+ On exit, the permuted matrix.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+
+ K1 (input) INTEGER
+ The first element of IPIV for which a row interchange will
+ be done.
+
+ K2 (input) INTEGER
+ The last element of IPIV for which a row interchange will
+ be done.
+
+ IPIV (input) INTEGER array, dimension (M*abs(INCX))
+ The vector of pivot indices. Only the elements in positions
+ K1 through K2 of IPIV are accessed.
+ IPIV(K) = L implies rows K and L are to be interchanged.
+
+ INCX (input) INTEGER
+ The increment between successive values of IPIV. If IPIV
+ is negative, the pivots are applied in reverse order.
+
+ Further Details
+ ===============
+
+ Modified by
+ R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
+
+ =====================================================================
+
+
+ Interchange row I with row IPIV(I) for each of rows K1 through K2.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = (*n / 32) << (5);
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ i__5 = i__ + k * a_dim1;
+ temp.r = a[i__5].r, temp.i = a[i__5].i;
+ i__5 = i__ + k * a_dim1;
+ i__6 = ip + k * a_dim1;
+ a[i__5].r = a[i__6].r, a[i__5].i = a[i__6].i;
+ i__5 = ip + k * a_dim1;
+ a[i__5].r = temp.r, a[i__5].i = temp.i;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ i__4 = i__ + k * a_dim1;
+ temp.r = a[i__4].r, temp.i = a[i__4].i;
+ i__4 = i__ + k * a_dim1;
+ i__5 = ip + k * a_dim1;
+ a[i__4].r = a[i__5].r, a[i__4].i = a[i__5].i;
+ i__4 = ip + k * a_dim1;
+ a[i__4].r = temp.r, a[i__4].i = temp.i;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of CLASWP */
+
+} /* claswp_ */
+
+/* Subroutine */ int clatrd_(char *uplo, integer *n, integer *nb, complex *a,
+ integer *lda, real *e, complex *tau, complex *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Local variables */
+ static integer i__, iw;
+ static complex alpha;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *), chemv_(char *, integer *, complex *,
+ complex *, integer *, complex *, integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *), clarfg_(integer *, complex *,
+ complex *, integer *, complex *), clacgv_(integer *, complex *,
+ integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLATRD reduces NB rows and columns of a complex Hermitian matrix A to
+ Hermitian tridiagonal form by a unitary similarity
+ transformation Q' * A * Q, and returns the matrices V and W which are
+ needed to apply the transformation to the unreduced part of A.
+
+ If UPLO = 'U', CLATRD reduces the last NB rows and columns of a
+ matrix, of which the upper triangle is supplied;
+ if UPLO = 'L', CLATRD reduces the first NB rows and columns of a
+ matrix, of which the lower triangle is supplied.
+
+ This is an auxiliary routine called by CHETRD.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ NB (input) INTEGER
+ The number of rows and columns to be reduced.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit:
+ if UPLO = 'U', the last NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements above the diagonal
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors;
+ if UPLO = 'L', the first NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements below the diagonal
+ with the array TAU, represent the unitary matrix Q as a
+ product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ E (output) REAL array, dimension (N-1)
+ If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
+ elements of the last NB columns of the reduced matrix;
+ if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
+ the first NB columns of the reduced matrix.
+
+ TAU (output) COMPLEX array, dimension (N-1)
+ The scalar factors of the elementary reflectors, stored in
+ TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
+ See Further Details.
+
+ W (output) COMPLEX array, dimension (LDW,NB)
+ The n-by-nb matrix W required to update the unreduced part
+ of A.
+
+ LDW (input) INTEGER
+ The leading dimension of the array W. LDW >= max(1,N).
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n) H(n-1) . . . H(n-nb+1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
+ and tau in TAU(i-1).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a complex scalar, and v is a complex vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
+ and tau in TAU(i).
+
+ The elements of the vectors v together form the n-by-nb matrix V
+ which is needed, with W, to apply the transformation to the unreduced
+ part of the matrix, using a Hermitian rank-2k update of the form:
+ A := A - V*W' - W*V'.
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5 and nb = 2:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( a a a v4 v5 ) ( d )
+ ( a a v4 v5 ) ( 1 d )
+ ( a 1 v5 ) ( v1 1 a )
+ ( d 1 ) ( v1 v2 a a )
+ ( d ) ( v1 v2 a a a )
+
+ where d denotes a diagonal element of the reduced matrix, a denotes
+ an element of the original matrix that is unchanged, and vi denotes
+ an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n - i__;
+ clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__, &i__2, &q__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b56, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &w[i__ + (iw + 1) * w_dim1], ldw);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__, &i__2, &q__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b56, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ }
+ if (i__ > 1) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(1:i-2,i)
+*/
+
+ i__2 = i__ - 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = i__ - 1;
+ clarfg_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &tau[i__
+ - 1]);
+ i__2 = i__ - 1;
+ e[i__2] = alpha.r;
+ i__2 = i__ - 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ chemv_("Upper", &i__2, &c_b56, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b55, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &w[(
+ iw + 1) * w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b55, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b56, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[(
+ i__ + 1) * a_dim1 + 1], lda, &a[i__ * a_dim1 + 1],
+ &c__1, &c_b55, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b56, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ cscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ q__3.r = -.5f, q__3.i = -0.f;
+ i__2 = i__ - 1;
+ q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
+ q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
+ i__3 = i__ - 1;
+ cdotc_(&q__4, &i__3, &w[iw * w_dim1 + 1], &c__1, &a[i__ *
+ a_dim1 + 1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = i__ - 1;
+ caxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + a_dim1], lda,
+ &w[i__ + w_dim1], ldw, &c_b56, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &w[i__ + w_dim1], ldw);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + w_dim1], ldw,
+ &a[i__ + a_dim1], lda, &c_b56, &a[i__ + i__ * a_dim1], &
+ c__1);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = i__ + i__ * a_dim1;
+ r__1 = a[i__3].r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:n,i)
+*/
+
+ i__2 = i__ + 1 + i__ * a_dim1;
+ alpha.r = a[i__2].r, alpha.i = a[i__2].i;
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ clarfg_(&i__2, &alpha, &a[min(i__3,*n) + i__ * a_dim1], &c__1,
+ &tau[i__]);
+ i__2 = i__;
+ e[i__2] = alpha.r;
+ i__2 = i__ + 1 + i__ * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ chemv_("Lower", &i__2, &c_b56, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b55, &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &w[i__ +
+ 1 + w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b55, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b56, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b55, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b56, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ cscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ q__3.r = -.5f, q__3.i = -0.f;
+ i__2 = i__;
+ q__2.r = q__3.r * tau[i__2].r - q__3.i * tau[i__2].i, q__2.i =
+ q__3.r * tau[i__2].i + q__3.i * tau[i__2].r;
+ i__3 = *n - i__;
+ cdotc_(&q__4, &i__3, &w[i__ + 1 + i__ * w_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ q__1.r = q__2.r * q__4.r - q__2.i * q__4.i, q__1.i = q__2.r *
+ q__4.i + q__2.i * q__4.r;
+ alpha.r = q__1.r, alpha.i = q__1.i;
+ i__2 = *n - i__;
+ caxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLATRD */
+
+} /* clatrd_ */
+
+/* Subroutine */ int clatrs_(char *uplo, char *trans, char *diag, char *
+ normin, integer *n, complex *a, integer *lda, complex *x, real *scale,
+ real *cnorm, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3, r__4;
+ complex q__1, q__2, q__3, q__4;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j;
+ static real xj, rec, tjj;
+ static integer jinc;
+ static real xbnd;
+ static integer imax;
+ static real tmax;
+ static complex tjjs;
+ static real xmax, grow;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static real tscal;
+ static complex uscal;
+ static integer jlast;
+ extern /* Complex */ VOID cdotu_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ static complex csumj;
+ extern /* Subroutine */ int caxpy_(integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int ctrsv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *);
+ static real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ static logical notran;
+ static integer jfirst;
+ static real smlnum;
+ static logical nounit;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1992
+
+
+ Purpose
+ =======
+
+ CLATRS solves one of the triangular systems
+
+ A * x = s*b, A**T * x = s*b, or A**H * x = s*b,
+
+ with scaling to prevent overflow. Here A is an upper or lower
+ triangular matrix, A**T denotes the transpose of A, A**H denotes the
+ conjugate transpose of A, x and b are n-element vectors, and s is a
+ scaling factor, usually less than or equal to 1, chosen so that the
+ components of x will be less than the overflow threshold. If the
+ unscaled problem will not cause overflow, the Level 2 BLAS routine
+ CTRSV is called. If the matrix A is singular (A(j,j) = 0 for some j),
+ then s is set to 0 and a non-trivial solution to A*x = 0 is returned.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ TRANS (input) CHARACTER*1
+ Specifies the operation applied to A.
+ = 'N': Solve A * x = s*b (No transpose)
+ = 'T': Solve A**T * x = s*b (Transpose)
+ = 'C': Solve A**H * x = s*b (Conjugate transpose)
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ NORMIN (input) CHARACTER*1
+ Specifies whether CNORM has been set or not.
+ = 'Y': CNORM contains the column norms on entry
+ = 'N': CNORM is not set on entry. On exit, the norms will
+ be computed and stored in CNORM.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The triangular matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of the array A contains the upper
+ triangular matrix, and the strictly lower triangular part of
+ A is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of the array A contains the lower triangular
+ matrix, and the strictly upper triangular part of A is not
+ referenced. If DIAG = 'U', the diagonal elements of A are
+ also not referenced and are assumed to be 1.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max (1,N).
+
+ X (input/output) COMPLEX array, dimension (N)
+ On entry, the right hand side b of the triangular system.
+ On exit, X is overwritten by the solution vector x.
+
+ SCALE (output) REAL
+ The scaling factor s for the triangular system
+ A * x = s*b, A**T * x = s*b, or A**H * x = s*b.
+ If SCALE = 0, the matrix A is singular or badly scaled, and
+ the vector x is an exact or approximate solution to A*x = 0.
+
+ CNORM (input or output) REAL array, dimension (N)
+
+ If NORMIN = 'Y', CNORM is an input argument and CNORM(j)
+ contains the norm of the off-diagonal part of the j-th column
+ of A. If TRANS = 'N', CNORM(j) must be greater than or equal
+ to the infinity-norm, and if TRANS = 'T' or 'C', CNORM(j)
+ must be greater than or equal to the 1-norm.
+
+ If NORMIN = 'N', CNORM is an output argument and CNORM(j)
+ returns the 1-norm of the offdiagonal part of the j-th column
+ of A.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ Further Details
+ ======= =======
+
+ A rough bound on x is computed; if that is less than overflow, CTRSV
+ is called, otherwise, specific code is used which checks for possible
+ overflow or divide-by-zero at every operation.
+
+ A columnwise scheme is used for solving A*x = b. The basic algorithm
+ if A is lower triangular is
+
+ x[1:n] := b[1:n]
+ for j = 1, ..., n
+ x(j) := x(j) / A(j,j)
+ x[j+1:n] := x[j+1:n] - x(j) * A[j+1:n,j]
+ end
+
+ Define bounds on the components of x after j iterations of the loop:
+ M(j) = bound on x[1:j]
+ G(j) = bound on x[j+1:n]
+ Initially, let M(0) = 0 and G(0) = max{x(i), i=1,...,n}.
+
+ Then for iteration j+1 we have
+ M(j+1) <= G(j) / | A(j+1,j+1) |
+ G(j+1) <= G(j) + M(j+1) * | A[j+2:n,j+1] |
+ <= G(j) ( 1 + CNORM(j+1) / | A(j+1,j+1) | )
+
+ where CNORM(j+1) is greater than or equal to the infinity-norm of
+ column j+1 of A, not counting the diagonal. Hence
+
+ G(j) <= G(0) product ( 1 + CNORM(i) / | A(i,i) | )
+ 1<=i<=j
+ and
+
+ |x(j)| <= ( G(0) / |A(j,j)| ) product ( 1 + CNORM(i) / |A(i,i)| )
+ 1<=i< j
+
+ Since |x(j)| <= M(j), we use the Level 2 BLAS routine CTRSV if the
+ reciprocal of the largest M(j), j=1,..,n, is larger than
+ max(underflow, 1/overflow).
+
+ The bound on x(j) is also used to determine when a step in the
+ columnwise method can be performed without fear of overflow. If
+ the computed bound is greater than a large constant, x is scaled to
+ prevent overflow, but if the bound overflows, x is set to 0, x(j) to
+ 1, and scale to 0, and a non-trivial solution to A*x = 0 is found.
+
+ Similarly, a row-wise scheme is used to solve A**T *x = b or
+ A**H *x = b. The basic algorithm for A upper triangular is
+
+ for j = 1, ..., n
+ x(j) := ( b(j) - A[1:j-1,j]' * x[1:j-1] ) / A(j,j)
+ end
+
+ We simultaneously compute two bounds
+ G(j) = bound on ( b(i) - A[1:i-1,i]' * x[1:i-1] ), 1<=i<=j
+ M(j) = bound on x(i), 1<=i<=j
+
+ The initial values are G(0) = 0, M(0) = max{b(i), i=1,..,n}, and we
+ add the constraint G(j) >= G(j-1) and M(j) >= M(j-1) for j >= 1.
+ Then the bound on x(j) is
+
+ M(j) <= M(j-1) * ( 1 + CNORM(j) ) / | A(j,j) |
+
+ <= M(0) * product ( ( 1 + CNORM(i) ) / |A(i,i)| )
+ 1<=i<=j
+
+ and we can safely call CTRSV if 1/M(n) and 1/G(n) are both greater
+ than max(underflow, 1/overflow).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --x;
+ --cnorm;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ notran = lsame_(trans, "N");
+ nounit = lsame_(diag, "N");
+
+/* Test the input parameters. */
+
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
+ *info = -2;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -3;
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLATRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine machine dependent parameters to control overflow. */
+
+ smlnum = slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum /= slamch_("Precision");
+ bignum = 1.f / smlnum;
+ *scale = 1.f;
+
+ if (lsame_(normin, "N")) {
+
+/* Compute the 1-norm of each column, not including the diagonal. */
+
+ if (upper) {
+
+/* A is upper triangular. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ cnorm[j] = scasum_(&i__2, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* A is lower triangular. */
+
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ cnorm[j] = scasum_(&i__2, &a[j + 1 + j * a_dim1], &c__1);
+/* L20: */
+ }
+ cnorm[*n] = 0.f;
+ }
+ }
+
+/*
+ Scale the column norms by TSCAL if the maximum element in CNORM is
+ greater than BIGNUM/2.
+*/
+
+ imax = isamax_(n, &cnorm[1], &c__1);
+ tmax = cnorm[imax];
+ if (tmax <= bignum * .5f) {
+ tscal = 1.f;
+ } else {
+ tscal = .5f / (smlnum * tmax);
+ sscal_(n, &tscal, &cnorm[1], &c__1);
+ }
+
+/*
+ Compute a bound on the computed solution vector to see if the
+ Level 2 BLAS routine CTRSV can be used.
+*/
+
+ xmax = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__2].r / 2.f, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]) / 2.f, dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L30: */
+ }
+ xbnd = xmax;
+
+ if (notran) {
+
+/* Compute the growth in A * x = b. */
+
+ if (upper) {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ } else {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L60;
+ }
+
+ if (nounit) {
+
+/*
+ A is non-unit triangular.
+
+ Compute GROW = 1/G(j) and XBND = 1/M(j).
+ Initially, G(0) = max{x(i), i=1,...,n}.
+*/
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/*
+ M(j) = G(j-1) / abs(A(j,j))
+
+ Computing MIN
+*/
+ r__1 = xbnd, r__2 = dmin(1.f,tjj) * grow;
+ xbnd = dmin(r__1,r__2);
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+
+ if (tjj + cnorm[j] >= smlnum) {
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) */
+
+ grow *= tjj / (tjj + cnorm[j]);
+ } else {
+
+/* G(j) could overflow, set GROW to 0. */
+
+ grow = 0.f;
+ }
+/* L40: */
+ }
+ grow = xbnd;
+ } else {
+
+/*
+ A is unit triangular.
+
+ Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}.
+
+ Computing MIN
+*/
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L60;
+ }
+
+/* G(j) = G(j-1)*( 1 + CNORM(j) ) */
+
+ grow *= 1.f / (cnorm[j] + 1.f);
+/* L50: */
+ }
+ }
+L60:
+
+ ;
+ } else {
+
+/* Compute the growth in A**T * x = b or A**H * x = b. */
+
+ if (upper) {
+ jfirst = 1;
+ jlast = *n;
+ jinc = 1;
+ } else {
+ jfirst = *n;
+ jlast = 1;
+ jinc = -1;
+ }
+
+ if (tscal != 1.f) {
+ grow = 0.f;
+ goto L90;
+ }
+
+ if (nounit) {
+
+/*
+ A is non-unit triangular.
+
+ Compute GROW = 1/G(j) and XBND = 1/M(j).
+ Initially, M(0) = max{x(i), i=1,...,n}.
+*/
+
+ grow = .5f / dmax(xbnd,smlnum);
+ xbnd = grow;
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) */
+
+ xj = cnorm[j] + 1.f;
+/* Computing MIN */
+ r__1 = grow, r__2 = xbnd / xj;
+ grow = dmin(r__1,r__2);
+
+ i__3 = j + j * a_dim1;
+ tjjs.r = a[i__3].r, tjjs.i = a[i__3].i;
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+
+ if (tjj >= smlnum) {
+
+/* M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) */
+
+ if (xj > tjj) {
+ xbnd *= tjj / xj;
+ }
+ } else {
+
+/* M(j) could overflow, set XBND to 0. */
+
+ xbnd = 0.f;
+ }
+/* L70: */
+ }
+ grow = dmin(grow,xbnd);
+ } else {
+
+/*
+ A is unit triangular.
+
+ Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}.
+
+ Computing MIN
+*/
+ r__1 = 1.f, r__2 = .5f / dmax(xbnd,smlnum);
+ grow = dmin(r__1,r__2);
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/* Exit the loop if the growth factor is too small. */
+
+ if (grow <= smlnum) {
+ goto L90;
+ }
+
+/* G(j) = ( 1 + CNORM(j) )*G(j-1) */
+
+ xj = cnorm[j] + 1.f;
+ grow /= xj;
+/* L80: */
+ }
+ }
+L90:
+ ;
+ }
+
+ if (grow * tscal > smlnum) {
+
+/*
+ Use the Level 2 BLAS solve if the reciprocal of the bound on
+ elements of X is not too small.
+*/
+
+ ctrsv_(uplo, trans, diag, n, &a[a_offset], lda, &x[1], &c__1);
+ } else {
+
+/* Use a Level 1 BLAS solve, scaling intermediate results. */
+
+ if (xmax > bignum * .5f) {
+
+/*
+ Scale X so that its components are less than or equal to
+ BIGNUM in absolute value.
+*/
+
+ *scale = bignum * .5f / xmax;
+ csscal_(n, scale, &x[1], &c__1);
+ xmax = bignum;
+ } else {
+ xmax *= 2.f;
+ }
+
+ if (notran) {
+
+/* Solve A * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/* Compute x(j) = b(j) / A(j,j), scaling x if necessary. */
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3].i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L105;
+ }
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale x by 1/b(j). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/*
+ Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM
+ to avoid overflow when dividing by A(j,j).
+*/
+
+ rec = tjj * bignum / xj;
+ if (cnorm[j] > 1.f) {
+
+/*
+ Scale by 1/CNORM(j) to avoid overflow when
+ multiplying x(j) times column j.
+*/
+
+ rec /= cnorm[j];
+ }
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0, and compute a solution to A*x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L100: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ xj = 1.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L105:
+
+/*
+ Scale x if necessary to avoid overflow when adding a
+ multiple of column j of A.
+*/
+
+ if (xj > 1.f) {
+ rec = 1.f / xj;
+ if (cnorm[j] > (bignum - xmax) * rec) {
+
+/* Scale x by 1/(2*abs(x(j))). */
+
+ rec *= .5f;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ }
+ } else if (xj * cnorm[j] > bignum - xmax) {
+
+/* Scale x by 1/2. */
+
+ csscal_(n, &c_b2206, &x[1], &c__1);
+ *scale *= .5f;
+ }
+
+ if (upper) {
+ if (j > 1) {
+
+/*
+ Compute the update
+ x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j)
+*/
+
+ i__3 = j - 1;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ i__3 = j - 1;
+ i__ = icamax_(&i__3, &x[1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ } else {
+ if (j < *n) {
+
+/*
+ Compute the update
+ x(j+1:n) := x(j+1:n) - x(j) * A(j+1:n,j)
+*/
+
+ i__3 = *n - j;
+ i__4 = j;
+ q__2.r = -x[i__4].r, q__2.i = -x[i__4].i;
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ caxpy_(&i__3, &q__1, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ i__3 = *n - j;
+ i__ = j + icamax_(&i__3, &x[j + 1], &c__1);
+ i__3 = i__;
+ xmax = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[i__]), dabs(r__2));
+ }
+ }
+/* L110: */
+ }
+
+ } else if (lsame_(trans, "T")) {
+
+/* Solve A**T * x = b */
+
+ i__2 = jlast;
+ i__1 = jinc;
+ for (j = jfirst; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Compute x(j) = b(j) - sum A(k,j)*x(k).
+ k<>j
+*/
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/*
+ Divide by A(j,j) when scaling x if A(j,j) > 1.
+
+ Computing MIN
+*/
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/*
+ If the scaling needed for A in the dot product is 1,
+ call CDOTU to perform the dot product.
+*/
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotu_(&q__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotu_(&q__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ q__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, q__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L120: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ i__4 = i__ + j * a_dim1;
+ q__3.r = a[i__4].r * uscal.r - a[i__4].i *
+ uscal.i, q__3.i = a[i__4].r * uscal.i + a[
+ i__4].i * uscal.r;
+ i__5 = i__;
+ q__2.r = q__3.r * x[i__5].r - q__3.i * x[i__5].i,
+ q__2.i = q__3.r * x[i__5].i + q__3.i * x[
+ i__5].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L130: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/*
+ Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
+ was not used to scale the dotproduct.
+*/
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+ i__3 = j + j * a_dim1;
+ q__1.r = tscal * a[i__3].r, q__1.i = tscal * a[i__3]
+ .i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L145;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0 and compute a solution to A**T *x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L140: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L145:
+ ;
+ } else {
+
+/*
+ Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot
+ product has already been divided by 1/A(j,j).
+*/
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L150: */
+ }
+
+ } else {
+
+/* Solve A**H * x = b */
+
+ i__1 = jlast;
+ i__2 = jinc;
+ for (j = jfirst; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Compute x(j) = b(j) - sum A(k,j)*x(k).
+ k<>j
+*/
+
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]),
+ dabs(r__2));
+ uscal.r = tscal, uscal.i = 0.f;
+ rec = 1.f / dmax(xmax,1.f);
+ if (cnorm[j] > (bignum - xj) * rec) {
+
+/* If x(j) could overflow, scale x by 1/(2*XMAX). */
+
+ rec *= .5f;
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ }
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > 1.f) {
+
+/*
+ Divide by A(j,j) when scaling x if A(j,j) > 1.
+
+ Computing MIN
+*/
+ r__1 = 1.f, r__2 = rec * tjj;
+ rec = dmin(r__1,r__2);
+ cladiv_(&q__1, &uscal, &tjjs);
+ uscal.r = q__1.r, uscal.i = q__1.i;
+ }
+ if (rec < 1.f) {
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+
+ csumj.r = 0.f, csumj.i = 0.f;
+ if (uscal.r == 1.f && uscal.i == 0.f) {
+
+/*
+ If the scaling needed for A in the dot product is 1,
+ call CDOTC to perform the dot product.
+*/
+
+ if (upper) {
+ i__3 = j - 1;
+ cdotc_(&q__1, &i__3, &a[j * a_dim1 + 1], &c__1, &x[1],
+ &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ } else if (j < *n) {
+ i__3 = *n - j;
+ cdotc_(&q__1, &i__3, &a[j + 1 + j * a_dim1], &c__1, &
+ x[j + 1], &c__1);
+ csumj.r = q__1.r, csumj.i = q__1.i;
+ }
+ } else {
+
+/* Otherwise, use in-line code for the dot product. */
+
+ if (upper) {
+ i__3 = j - 1;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &a[i__ + j * a_dim1]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L160: */
+ }
+ } else if (j < *n) {
+ i__3 = *n;
+ for (i__ = j + 1; i__ <= i__3; ++i__) {
+ r_cnjg(&q__4, &a[i__ + j * a_dim1]);
+ q__3.r = q__4.r * uscal.r - q__4.i * uscal.i,
+ q__3.i = q__4.r * uscal.i + q__4.i *
+ uscal.r;
+ i__4 = i__;
+ q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i,
+ q__2.i = q__3.r * x[i__4].i + q__3.i * x[
+ i__4].r;
+ q__1.r = csumj.r + q__2.r, q__1.i = csumj.i +
+ q__2.i;
+ csumj.r = q__1.r, csumj.i = q__1.i;
+/* L170: */
+ }
+ }
+ }
+
+ q__1.r = tscal, q__1.i = 0.f;
+ if (uscal.r == q__1.r && uscal.i == q__1.i) {
+
+/*
+ Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
+ was not used to scale the dotproduct.
+*/
+
+ i__3 = j;
+ i__4 = j;
+ q__1.r = x[i__4].r - csumj.r, q__1.i = x[i__4].i -
+ csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ i__3 = j;
+ xj = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 = r_imag(&x[j]
+ ), dabs(r__2));
+ if (nounit) {
+ r_cnjg(&q__2, &a[j + j * a_dim1]);
+ q__1.r = tscal * q__2.r, q__1.i = tscal * q__2.i;
+ tjjs.r = q__1.r, tjjs.i = q__1.i;
+ } else {
+ tjjs.r = tscal, tjjs.i = 0.f;
+ if (tscal == 1.f) {
+ goto L185;
+ }
+ }
+
+/* Compute x(j) = x(j) / A(j,j), scaling if necessary. */
+
+ tjj = (r__1 = tjjs.r, dabs(r__1)) + (r__2 = r_imag(&tjjs),
+ dabs(r__2));
+ if (tjj > smlnum) {
+
+/* abs(A(j,j)) > SMLNUM: */
+
+ if (tjj < 1.f) {
+ if (xj > tjj * bignum) {
+
+/* Scale X by 1/abs(x(j)). */
+
+ rec = 1.f / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else if (tjj > 0.f) {
+
+/* 0 < abs(A(j,j)) <= SMLNUM: */
+
+ if (xj > tjj * bignum) {
+
+/* Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. */
+
+ rec = tjj * bignum / xj;
+ csscal_(n, &rec, &x[1], &c__1);
+ *scale *= rec;
+ xmax *= rec;
+ }
+ i__3 = j;
+ cladiv_(&q__1, &x[j], &tjjs);
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ } else {
+
+/*
+ A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and
+ scale = 0 and compute a solution to A**H *x = 0.
+*/
+
+ i__3 = *n;
+ for (i__ = 1; i__ <= i__3; ++i__) {
+ i__4 = i__;
+ x[i__4].r = 0.f, x[i__4].i = 0.f;
+/* L180: */
+ }
+ i__3 = j;
+ x[i__3].r = 1.f, x[i__3].i = 0.f;
+ *scale = 0.f;
+ xmax = 0.f;
+ }
+L185:
+ ;
+ } else {
+
+/*
+ Compute x(j) := x(j) / A(j,j) - CSUMJ if the dot
+ product has already been divided by 1/A(j,j).
+*/
+
+ i__3 = j;
+ cladiv_(&q__2, &x[j], &tjjs);
+ q__1.r = q__2.r - csumj.r, q__1.i = q__2.i - csumj.i;
+ x[i__3].r = q__1.r, x[i__3].i = q__1.i;
+ }
+/* Computing MAX */
+ i__3 = j;
+ r__3 = xmax, r__4 = (r__1 = x[i__3].r, dabs(r__1)) + (r__2 =
+ r_imag(&x[j]), dabs(r__2));
+ xmax = dmax(r__3,r__4);
+/* L190: */
+ }
+ }
+ *scale /= tscal;
+ }
+
+/* Scale the column norms by 1/TSCAL for return. */
+
+ if (tscal != 1.f) {
+ r__1 = 1.f / tscal;
+ sscal_(n, &r__1, &cnorm[1], &c__1);
+ }
+
+ return 0;
+
+/* End of CLATRS */
+
+} /* clatrs_ */
+
+/* Subroutine */ int clauu2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__;
+ static real aii;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *);
+ static logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLAUU2 computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the unblocked form of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ cdotc_(&q__1, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+ r__1 = aii * aii + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ q__1.r = aii, q__1.i = 0.f;
+ cgemv_("No transpose", &i__2, &i__3, &c_b56, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ q__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ clacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ } else {
+ csscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ cdotc_(&q__1, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ r__1 = aii * aii + q__1.r;
+ a[i__2].r = r__1, a[i__2].i = 0.f;
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ q__1.r = aii, q__1.i = 0.f;
+ cgemv_("Conjugate transpose", &i__2, &i__3, &c_b56, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ q__1, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ clacgv_(&i__2, &a[i__ + a_dim1], lda);
+ } else {
+ csscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CLAUU2 */
+
+} /* clauu2_ */
+
+/* Subroutine */ int clauum_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, ib, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+ , complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ static logical upper;
+ extern /* Subroutine */ int clauu2_(char *, integer *, complex *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CLAUUM computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the blocked form of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CLAUUM", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ clauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ctrmm_("Right", "Upper", "Conjugate transpose", "Non-unit", &
+ i__3, &ib, &c_b56, &a[i__ + i__ * a_dim1], lda, &a[
+ i__ * a_dim1 + 1], lda);
+ clauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &ib,
+ &i__4, &c_b56, &a[(i__ + ib) * a_dim1 + 1], lda, &
+ a[i__ + (i__ + ib) * a_dim1], lda, &c_b56, &a[i__
+ * a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ cherk_("Upper", "No transpose", &ib, &i__3, &c_b1011, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b1011, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ctrmm_("Left", "Lower", "Conjugate transpose", "Non-unit", &
+ ib, &i__3, &c_b56, &a[i__ + i__ * a_dim1], lda, &a[
+ i__ + a_dim1], lda);
+ clauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ cgemm_("Conjugate transpose", "No transpose", &ib, &i__3,
+ &i__4, &c_b56, &a[i__ + ib + i__ * a_dim1], lda, &
+ a[i__ + ib + a_dim1], lda, &c_b56, &a[i__ +
+ a_dim1], lda);
+ i__3 = *n - i__ - ib + 1;
+ cherk_("Lower", "Conjugate transpose", &ib, &i__3, &
+ c_b1011, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b1011, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CLAUUM */
+
+} /* clauum_ */
+
+/* Subroutine */ int cpotf2_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer j;
+ static real ajj;
+ extern /* Complex */ VOID cdotc_(complex *, integer *, complex *, integer
+ *, complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *);
+ static logical upper;
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ csscal_(integer *, real *, complex *, integer *), xerbla_(char *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CPOTF2 computes the Cholesky factorization of a complex Hermitian
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U' * U , if UPLO = 'U', or
+ A = L * L', if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the unblocked version of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ Hermitian matrix A is stored.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ n by n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U'*U or A = L*L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, the leading minor of order k is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ i__3 = j - 1;
+ cdotc_(&q__2, &i__3, &a[j * a_dim1 + 1], &c__1, &a[j * a_dim1 + 1]
+ , &c__1);
+ q__1.r = r__1 - q__2.r, q__1.i = -q__2.i;
+ ajj = q__1.r;
+ if (ajj <= 0.f) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ i__3 = *n - j;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("Transpose", &i__2, &i__3, &q__1, &a[(j + 1) * a_dim1
+ + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b56, &a[j + (
+ j + 1) * a_dim1], lda);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j * a_dim1 + 1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j + j * a_dim1;
+ r__1 = a[i__2].r;
+ i__3 = j - 1;
+ cdotc_(&q__2, &i__3, &a[j + a_dim1], lda, &a[j + a_dim1], lda);
+ q__1.r = r__1 - q__2.r, q__1.i = -q__2.i;
+ ajj = q__1.r;
+ if (ajj <= 0.f) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ i__2 = j + j * a_dim1;
+ a[i__2].r = ajj, a[i__2].i = 0.f;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ i__3 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemv_("No transpose", &i__2, &i__3, &q__1, &a[j + 1 + a_dim1]
+ , lda, &a[j + a_dim1], lda, &c_b56, &a[j + 1 + j *
+ a_dim1], &c__1);
+ i__2 = j - 1;
+ clacgv_(&i__2, &a[j + a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ csscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of CPOTF2 */
+
+} /* cpotf2_ */
+
+/* Subroutine */ int cpotrf_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ complex q__1;
+
+ /* Local variables */
+ static integer j, jb, nb;
+ extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *,
+ integer *, complex *, complex *, integer *, complex *, integer *,
+ complex *, complex *, integer *), cherk_(char *,
+ char *, integer *, integer *, real *, complex *, integer *, real *
+ , complex *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ static logical upper;
+ extern /* Subroutine */ int cpotf2_(char *, integer *, complex *, integer
+ *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CPOTRF computes the Cholesky factorization of a complex Hermitian
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U**H * U, if UPLO = 'U', or
+ A = L * L**H, if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the block version of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the Hermitian matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U**H*U or A = L*L**H.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the leading minor of order i is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "CPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code. */
+
+ cpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ cherk_("Upper", "Conjugate transpose", &jb, &i__3, &c_b1290, &
+ a[j * a_dim1 + 1], lda, &c_b1011, &a[j + j * a_dim1],
+ lda);
+ cpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("Conjugate transpose", "No transpose", &jb, &i__3,
+ &i__4, &q__1, &a[j * a_dim1 + 1], lda, &a[(j + jb)
+ * a_dim1 + 1], lda, &c_b56, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-unit",
+ &jb, &i__3, &c_b56, &a[j + j * a_dim1], lda, &a[
+ j + (j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ cherk_("Lower", "No transpose", &jb, &i__3, &c_b1290, &a[j +
+ a_dim1], lda, &c_b1011, &a[j + j * a_dim1], lda);
+ cpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ cgemm_("No transpose", "Conjugate transpose", &i__3, &jb,
+ &i__4, &q__1, &a[j + jb + a_dim1], lda, &a[j +
+ a_dim1], lda, &c_b56, &a[j + jb + j * a_dim1],
+ lda);
+ i__3 = *n - j - jb + 1;
+ ctrsm_("Right", "Lower", "Conjugate transpose", "Non-unit"
+ , &i__3, &jb, &c_b56, &a[j + j * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of CPOTRF */
+
+} /* cpotrf_ */
+
+/* Subroutine */ int cpotri_(char *uplo, integer *n, complex *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), clauum_(
+ char *, integer *, complex *, integer *, integer *),
+ ctrtri_(char *, char *, integer *, complex *, integer *, integer *
+ );
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ CPOTRI computes the inverse of a complex Hermitian positive definite
+ matrix A using the Cholesky factorization A = U**H*U or A = L*L**H
+ computed by CPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the triangular factor U or L from the Cholesky
+ factorization A = U**H*U or A = L*L**H, as computed by
+ CPOTRF.
+ On exit, the upper or lower triangle of the (Hermitian)
+ inverse of A, overwriting the input factor U or L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the (i,i) element of the factor U or L is
+ zero, and the inverse could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ctrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ clauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of CPOTRI */
+
+} /* cpotri_ */
+
+/* Subroutine */ int cpotrs_(char *uplo, integer *n, integer *nrhs, complex *
+ a, integer *lda, complex *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CPOTRS solves a system of linear equations A*X = B with a Hermitian
+ positive definite matrix A using the Cholesky factorization
+ A = U**H*U or A = L*L**H computed by CPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,N)
+ The triangular factor U or L from the Cholesky factorization
+ A = U**H*U or A = L*L**H, as computed by CPOTRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ B (input/output) COMPLEX array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (upper) {
+
+/*
+ Solve A*X = B where A = U'*U.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ ctrsm_("Left", "Upper", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b56, &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b56, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A*X = B where A = L*L'.
+
+ Solve L*X = B, overwriting B with X.
+*/
+
+ ctrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b56, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ctrsm_("Left", "Lower", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b56, &a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of CPOTRS */
+
+} /* cpotrs_ */
+
+/* Subroutine */ int csrot_(integer *n, complex *cx, integer *incx, complex *
+ cy, integer *incy, real *c__, real *s)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3, i__4;
+ complex q__1, q__2, q__3;
+
+ /* Local variables */
+ static integer i__, ix, iy;
+ static complex ctemp;
+
+
+/*
+ applies a plane rotation, where the cos and sin (c and s) are real
+ and the vectors cx and cy are complex.
+ jack dongarra, linpack, 3/11/78.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --cy;
+ --cx;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+ if (*incx == 1 && *incy == 1) {
+ goto L20;
+ }
+
+/*
+ code for unequal increments or equal increments not equal
+ to 1
+*/
+
+ ix = 1;
+ iy = 1;
+ if (*incx < 0) {
+ ix = (-(*n) + 1) * *incx + 1;
+ }
+ if (*incy < 0) {
+ iy = (-(*n) + 1) * *incy + 1;
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = ix;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = iy;
+ q__3.r = *s * cy[i__3].r, q__3.i = *s * cy[i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = iy;
+ i__3 = iy;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ i__4 = ix;
+ q__3.r = *s * cx[i__4].r, q__3.i = *s * cx[i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = ix;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+ ix += *incx;
+ iy += *incy;
+/* L10: */
+ }
+ return 0;
+
+/* code for both increments equal to 1 */
+
+L20:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__;
+ q__2.r = *c__ * cx[i__2].r, q__2.i = *c__ * cx[i__2].i;
+ i__3 = i__;
+ q__3.r = *s * cy[i__3].r, q__3.i = *s * cy[i__3].i;
+ q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
+ ctemp.r = q__1.r, ctemp.i = q__1.i;
+ i__2 = i__;
+ i__3 = i__;
+ q__2.r = *c__ * cy[i__3].r, q__2.i = *c__ * cy[i__3].i;
+ i__4 = i__;
+ q__3.r = *s * cx[i__4].r, q__3.i = *s * cx[i__4].i;
+ q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
+ cy[i__2].r = q__1.r, cy[i__2].i = q__1.i;
+ i__2 = i__;
+ cx[i__2].r = ctemp.r, cx[i__2].i = ctemp.i;
+/* L30: */
+ }
+ return 0;
+} /* csrot_ */
+
+/* Subroutine */ int cstedc_(char *compz, integer *n, real *d__, real *e,
+ complex *z__, integer *ldz, complex *work, integer *lwork, real *
+ rwork, integer *lrwork, integer *iwork, integer *liwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k, m;
+ static real p;
+ static integer ii, ll, end, lgn;
+ static real eps, tiny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer lwmin;
+ extern /* Subroutine */ int claed0_(integer *, integer *, real *, real *,
+ complex *, integer *, complex *, integer *, real *, integer *,
+ integer *);
+ static integer start;
+ extern /* Subroutine */ int clacrm_(integer *, integer *, complex *,
+ integer *, real *, integer *, complex *, integer *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex
+ *, integer *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), sstedc_(char *, integer *, real *, real *, real *,
+ integer *, real *, integer *, integer *, integer *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+ static integer liwmin, icompz;
+ extern /* Subroutine */ int csteqr_(char *, integer *, real *, real *,
+ complex *, integer *, real *, integer *);
+ static real orgnrm;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ static integer lrwmin;
+ static logical lquery;
+ static integer smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CSTEDC computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+ The eigenvectors of a full or band complex Hermitian matrix can also
+ be found if CHETRD or CHPTRD or CHBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See SLAED3 for details.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'I': Compute eigenvectors of tridiagonal matrix also.
+ = 'V': Compute eigenvectors of original Hermitian matrix
+ also. On entry, Z contains the unitary matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the subdiagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) COMPLEX array, dimension (LDZ,N)
+ On entry, if COMPZ = 'V', then Z contains the unitary
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original Hermitian matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1.
+ If eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If COMPZ = 'N' or 'I', or N <= 1, LWORK must be at least 1.
+ If COMPZ = 'V' and N > 1, LWORK must be at least N*N.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ RWORK (workspace/output) REAL array,
+ dimension (LRWORK)
+ On exit, if INFO = 0, RWORK(1) returns the optimal LRWORK.
+
+ LRWORK (input) INTEGER
+ The dimension of the array RWORK.
+ If COMPZ = 'N' or N <= 1, LRWORK must be at least 1.
+ If COMPZ = 'V' and N > 1, LRWORK must be at least
+ 1 + 3*N + 2*N*lg N + 3*N**2 ,
+ where lg( N ) = smallest integer k such
+ that 2**k >= N.
+ If COMPZ = 'I' and N > 1, LRWORK must be at least
+ 1 + 4*N + 2*N**2 .
+
+ If LRWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the RWORK array,
+ returns this value as the first entry of the RWORK array, and
+ no error message related to LRWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If COMPZ = 'N' or N <= 1, LIWORK must be at least 1.
+ If COMPZ = 'V' or N > 1, LIWORK must be at least
+ 6 + 6*N + 5*N*lg N.
+ If COMPZ = 'I' or N > 1, LIWORK must be at least
+ 3 + 5*N .
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --rwork;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = ((*lwork == -1) || (*lrwork == -1)) || (*liwork == -1);
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if ((*n <= 1) || (icompz <= 0)) {
+ lwmin = 1;
+ liwmin = 1;
+ lrwmin = 1;
+ } else {
+ lgn = (integer) (log((real) (*n)) / log(2.f));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+ lwmin = *n * *n;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+ lwmin = 1;
+/* Computing 2nd power */
+ i__1 = *n;
+ lrwmin = ((*n) << (2)) + 1 + ((i__1 * i__1) << (1));
+ liwmin = *n * 5 + 3;
+ }
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*lrwork < lrwmin && ! lquery) {
+ *info = -10;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "CSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ If the following conditional clause is removed, then the routine
+ will use the Divide and Conquer routine to compute only the
+ eigenvalues, which requires (3N + 3N**2) real workspace and
+ (2 + 5N + 2N lg(N)) integer workspace.
+ Since on many architectures SSTERF is much faster than any other
+ algorithm for finding eigenvalues only, it is used here
+ as the default.
+
+ If COMPZ = 'N', use SSTERF to compute the eigenvalues.
+*/
+
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ return 0;
+ }
+
+/*
+ If N is smaller than the minimum divide size (SMLSIZ+1), then
+ solve the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ return 0;
+ } else if (icompz == 2) {
+ csteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+ return 0;
+ } else {
+ csteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &rwork[1],
+ info);
+ return 0;
+ }
+ }
+
+/* If COMPZ = 'I', we simply call SSTEDC instead. */
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b320, &c_b1011, &rwork[1], n);
+ ll = *n * *n + 1;
+ i__1 = *lrwork - ll + 1;
+ sstedc_("I", n, &d__[1], &e[1], &rwork[1], n, &rwork[ll], &i__1, &
+ iwork[1], liwork, info);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * z_dim1;
+ i__4 = (j - 1) * *n + i__;
+ z__[i__3].r = rwork[i__4], z__[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ return 0;
+ }
+
+/*
+ From now on, only option left to be handled is COMPZ = 'V',
+ i.e. ICOMPZ = 1.
+
+ Scale.
+*/
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L30:
+ if (start <= *n) {
+
+/*
+ Let END be the position of the next subdiagonal entry such that
+ E( END ) <= TINY or END = N if no such subdiagonal exists. The
+ matrix identified by the elements between START and END
+ constitutes an independent sub-problem.
+*/
+
+ end = start;
+L40:
+ if (end < *n) {
+ tiny = eps * sqrt((r__1 = d__[end], dabs(r__1))) * sqrt((r__2 =
+ d__[end + 1], dabs(r__2)));
+ if ((r__1 = e[end], dabs(r__1)) > tiny) {
+ ++end;
+ goto L40;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = end - start + 1;
+ if (m > smlsiz) {
+ *info = smlsiz;
+
+/* Scale. */
+
+ orgnrm = slanst_("M", &m, &d__[start], &e[start]);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ claed0_(n, &m, &d__[start], &e[start], &z__[start * z_dim1 + 1],
+ ldz, &work[1], n, &rwork[1], &iwork[1], info);
+ if (*info > 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
+ + 1) + start - 1;
+ return 0;
+ }
+
+/* Scale back. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ ssteqr_("I", &m, &d__[start], &e[start], &rwork[1], &m, &rwork[m *
+ m + 1], info);
+ clacrm_(n, &m, &z__[start * z_dim1 + 1], ldz, &rwork[1], &m, &
+ work[1], n, &rwork[m * m + 1]);
+ clacpy_("A", n, &m, &work[1], n, &z__[start * z_dim1 + 1], ldz);
+ if (*info > 0) {
+ *info = start * (*n + 1) + end;
+ return 0;
+ }
+ }
+
+ start = end + 1;
+ goto L30;
+ }
+
+/*
+ endwhile
+
+ If the problem split any number of times, then the eigenvalues
+ will not be properly ordered. Here we permute the eigenvalues
+ (and the associated eigenvectors) into ascending order.
+*/
+
+ if (m != *n) {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L60: */
+ }
+ }
+
+ work[1].r = (real) lwmin, work[1].i = 0.f;
+ rwork[1] = (real) lrwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of CSTEDC */
+
+} /* cstedc_ */
+
+/* Subroutine */ int csteqr_(char *compz, integer *n, real *d__, real *e,
+ complex *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static real b, c__, f, g;
+ static integer i__, j, k, l, m;
+ static real p, r__, s;
+ static integer l1, ii, mm, lm1, mm1, nm1;
+ static real rt1, rt2, eps;
+ static integer lsv;
+ static real tst, eps2;
+ static integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, complex *, integer *);
+ static real anorm;
+ extern /* Subroutine */ int cswap_(integer *, complex *, integer *,
+ complex *, integer *);
+ static integer lendm1, lendp1;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+ , real *, real *);
+ extern doublereal slapy2_(real *, real *);
+ static integer iscale;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
+ *, complex *, complex *, integer *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ static integer lendsv;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+ );
+ static real ssfmin;
+ static integer nmaxit, icompz;
+ static real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CSTEQR computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the implicit QL or QR method.
+ The eigenvectors of a full or band complex Hermitian matrix can also
+ be found if CHETRD or CHPTRD or CHBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'V': Compute eigenvalues and eigenvectors of the original
+ Hermitian matrix. On entry, Z must contain the
+ unitary matrix used to reduce the original matrix
+ to tridiagonal form.
+ = 'I': Compute eigenvalues and eigenvectors of the
+ tridiagonal matrix. Z is initialized to the identity
+ matrix.
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) COMPLEX array, dimension (LDZ, N)
+ On entry, if COMPZ = 'V', then Z contains the unitary
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original Hermitian matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1, and if
+ eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace) REAL array, dimension (max(1,2*N-2))
+ If COMPZ = 'N', then WORK is not referenced.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm has failed to find all the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero; on exit, D
+ and E contain the elements of a symmetric tridiagonal
+ matrix which is unitarily similar to the original
+ matrix.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ i__1 = z_dim1 + 1;
+ z__[i__1].r = 1.f, z__[i__1].i = 0.f;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/*
+ Compute the eigenvalues and eigenvectors of the tridiagonal
+ matrix.
+*/
+
+ if (icompz == 2) {
+ claset_("Full", n, n, &c_b55, &c_b56, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (r__1 = e[m], dabs(r__1));
+ if (tst == 0.f) {
+ goto L30;
+ }
+ if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m
+ + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.f) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ + 1], dabs(r__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use SLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ clasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.f);
+ r__ = slapy2_(&g, &c_b1011);
+ g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ clasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m - 1], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ - 1], dabs(r__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use SLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ clasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
+ r__ = slapy2_(&g, &c_b1011);
+ g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ clasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot == nmaxit) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ return 0;
+ }
+ goto L10;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ cswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+ return 0;
+
+/* End of CSTEQR */
+
+} /* csteqr_ */
+
+/* Subroutine */ int ctrevc_(char *side, char *howmny, logical *select,
+ integer *n, complex *t, integer *ldt, complex *vl, integer *ldvl,
+ complex *vr, integer *ldvr, integer *mm, integer *m, complex *work,
+ real *rwork, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4, i__5;
+ real r__1, r__2, r__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ double r_imag(complex *);
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, k, ii, ki, is;
+ static real ulp;
+ static logical allv;
+ static real unfl, ovfl, smin;
+ static logical over;
+ static real scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int cgemv_(char *, integer *, integer *, complex *
+ , complex *, integer *, complex *, integer *, complex *, complex *
+ , integer *);
+ static real remax;
+ extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
+ complex *, integer *);
+ static logical leftv, bothv, somev;
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern integer icamax_(integer *, complex *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int csscal_(integer *, real *, complex *, integer
+ *), xerbla_(char *, integer *), clatrs_(char *, char *,
+ char *, char *, integer *, complex *, integer *, complex *, real *
+ , real *, integer *);
+ extern doublereal scasum_(integer *, complex *, integer *);
+ static logical rightv;
+ static real smlnum;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CTREVC computes some or all of the right and/or left eigenvectors of
+ a complex upper triangular matrix T.
+
+ The right eigenvector x and the left eigenvector y of T corresponding
+ to an eigenvalue w are defined by:
+
+ T*x = w*x, y'*T = w*y'
+
+ where y' denotes the conjugate transpose of the vector y.
+
+ If all eigenvectors are requested, the routine may either return the
+ matrices X and/or Y of right or left eigenvectors of T, or the
+ products Q*X and/or Q*Y, where Q is an input unitary
+ matrix. If T was obtained from the Schur factorization of an
+ original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
+ right or left eigenvectors of A.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'R': compute right eigenvectors only;
+ = 'L': compute left eigenvectors only;
+ = 'B': compute both right and left eigenvectors.
+
+ HOWMNY (input) CHARACTER*1
+ = 'A': compute all right and/or left eigenvectors;
+ = 'B': compute all right and/or left eigenvectors,
+ and backtransform them using the input matrices
+ supplied in VR and/or VL;
+ = 'S': compute selected right and/or left eigenvectors,
+ specified by the logical array SELECT.
+
+ SELECT (input) LOGICAL array, dimension (N)
+ If HOWMNY = 'S', SELECT specifies the eigenvectors to be
+ computed.
+ If HOWMNY = 'A' or 'B', SELECT is not referenced.
+ To select the eigenvector corresponding to the j-th
+ eigenvalue, SELECT(j) must be set to .TRUE..
+
+ N (input) INTEGER
+ The order of the matrix T. N >= 0.
+
+ T (input/output) COMPLEX array, dimension (LDT,N)
+ The upper triangular matrix T. T is modified, but restored
+ on exit.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= max(1,N).
+
+ VL (input/output) COMPLEX array, dimension (LDVL,MM)
+ On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
+ contain an N-by-N matrix Q (usually the unitary matrix Q of
+ Schur vectors returned by CHSEQR).
+ On exit, if SIDE = 'L' or 'B', VL contains:
+ if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
+ VL is lower triangular. The i-th column
+ VL(i) of VL is the eigenvector corresponding
+ to T(i,i).
+ if HOWMNY = 'B', the matrix Q*Y;
+ if HOWMNY = 'S', the left eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VL, in the same order as their
+ eigenvalues.
+ If SIDE = 'R', VL is not referenced.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= max(1,N) if
+ SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
+
+ VR (input/output) COMPLEX array, dimension (LDVR,MM)
+ On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
+ contain an N-by-N matrix Q (usually the unitary matrix Q of
+ Schur vectors returned by CHSEQR).
+ On exit, if SIDE = 'R' or 'B', VR contains:
+ if HOWMNY = 'A', the matrix X of right eigenvectors of T;
+ VR is upper triangular. The i-th column
+ VR(i) of VR is the eigenvector corresponding
+ to T(i,i).
+ if HOWMNY = 'B', the matrix Q*X;
+ if HOWMNY = 'S', the right eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VR, in the same order as their
+ eigenvalues.
+ If SIDE = 'L', VR is not referenced.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= max(1,N) if
+ SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
+
+ MM (input) INTEGER
+ The number of columns in the arrays VL and/or VR. MM >= M.
+
+ M (output) INTEGER
+ The number of columns in the arrays VL and/or VR actually
+ used to store the eigenvectors. If HOWMNY = 'A' or 'B', M
+ is set to N. Each selected eigenvector occupies one
+ column.
+
+ WORK (workspace) COMPLEX array, dimension (2*N)
+
+ RWORK (workspace) REAL array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The algorithm used in this program is basically backward (forward)
+ substitution, with scaling to make the the code robust against
+ possible overflow.
+
+ Each eigenvector is normalized so that the element of largest
+ magnitude has magnitude 1; here the magnitude of a complex number
+ (x,y) is taken to be |x| + |y|.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+ --rwork;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = (lsame_(side, "R")) || (bothv);
+ leftv = (lsame_(side, "L")) || (bothv);
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+/*
+ Set M to the number of columns required to store the selected
+ eigenvectors.
+*/
+
+ if (somev) {
+ *m = 0;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (select[j]) {
+ ++(*m);
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if ((*ldvl < 1) || (leftv && *ldvl < *n)) {
+ *info = -8;
+ } else if ((*ldvr < 1) || (rightv && *ldvr < *n)) {
+ *info = -10;
+ } else if (*mm < *m) {
+ *info = -11;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+
+/* Store the diagonal elements of T in working array WORK. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + *n;
+ i__3 = i__ + i__ * t_dim1;
+ work[i__2].r = t[i__3].r, work[i__2].i = t[i__3].i;
+/* L20: */
+ }
+
+/*
+ Compute 1-norm of each column of strictly upper triangular
+ part of T to control overflow in triangular solver.
+*/
+
+ rwork[1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ rwork[j] = scasum_(&i__2, &t[j * t_dim1 + 1], &c__1);
+/* L30: */
+ }
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L80;
+ }
+ }
+/* Computing MAX */
+ i__1 = ki + ki * t_dim1;
+ r__3 = ulp * ((r__1 = t[i__1].r, dabs(r__1)) + (r__2 = r_imag(&t[
+ ki + ki * t_dim1]), dabs(r__2)));
+ smin = dmax(r__3,smlnum);
+
+ work[1].r = 1.f, work[1].i = 0.f;
+
+/* Form right-hand side. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k;
+ i__3 = k + ki * t_dim1;
+ q__1.r = -t[i__3].r, q__1.i = -t[i__3].i;
+ work[i__2].r = q__1.r, work[i__2].i = q__1.i;
+/* L40: */
+ }
+
+/*
+ Solve the triangular system:
+ (T(1:KI-1,1:KI-1) - T(KI,KI))*X = SCALE*WORK.
+*/
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + k * t_dim1;
+ i__4 = ki + ki * t_dim1;
+ q__1.r = t[i__3].r - t[i__4].r, q__1.i = t[i__3].i - t[i__4]
+ .i;
+ t[i__2].r = q__1.r, t[i__2].i = q__1.i;
+ i__2 = k + k * t_dim1;
+ if ((r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[k + k *
+ t_dim1]), dabs(r__2)) < smin) {
+ i__3 = k + k * t_dim1;
+ t[i__3].r = smin, t[i__3].i = 0.f;
+ }
+/* L50: */
+ }
+
+ if (ki > 1) {
+ i__1 = ki - 1;
+ clatrs_("Upper", "No transpose", "Non-unit", "Y", &i__1, &t[
+ t_offset], ldt, &work[1], &scale, &rwork[1], info);
+ i__1 = ki;
+ work[i__1].r = scale, work[i__1].i = 0.f;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ ccopy_(&ki, &work[1], &c__1, &vr[is * vr_dim1 + 1], &c__1);
+
+ ii = icamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ i__1 = ii + is * vr_dim1;
+ remax = 1.f / ((r__1 = vr[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&vr[ii + is * vr_dim1]), dabs(r__2)));
+ csscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ i__2 = k + is * vr_dim1;
+ vr[i__2].r = 0.f, vr[i__2].i = 0.f;
+/* L60: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ q__1.r = scale, q__1.i = 0.f;
+ cgemv_("N", n, &i__1, &c_b56, &vr[vr_offset], ldvr, &work[
+ 1], &c__1, &q__1, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ ii = icamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ i__1 = ii + ki * vr_dim1;
+ remax = 1.f / ((r__1 = vr[i__1].r, dabs(r__1)) + (r__2 =
+ r_imag(&vr[ii + ki * vr_dim1]), dabs(r__2)));
+ csscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = k + k * t_dim1;
+ i__3 = k + *n;
+ t[i__2].r = work[i__3].r, t[i__2].i = work[i__3].i;
+/* L70: */
+ }
+
+ --is;
+L80:
+ ;
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (somev) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ }
+/* Computing MAX */
+ i__2 = ki + ki * t_dim1;
+ r__3 = ulp * ((r__1 = t[i__2].r, dabs(r__1)) + (r__2 = r_imag(&t[
+ ki + ki * t_dim1]), dabs(r__2)));
+ smin = dmax(r__3,smlnum);
+
+ i__2 = *n;
+ work[i__2].r = 1.f, work[i__2].i = 0.f;
+
+/* Form right-hand side. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k;
+ r_cnjg(&q__2, &t[ki + k * t_dim1]);
+ q__1.r = -q__2.r, q__1.i = -q__2.i;
+ work[i__3].r = q__1.r, work[i__3].i = q__1.i;
+/* L90: */
+ }
+
+/*
+ Solve the triangular system:
+ (T(KI+1:N,KI+1:N) - T(KI,KI))'*X = SCALE*WORK.
+*/
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + k * t_dim1;
+ i__5 = ki + ki * t_dim1;
+ q__1.r = t[i__4].r - t[i__5].r, q__1.i = t[i__4].i - t[i__5]
+ .i;
+ t[i__3].r = q__1.r, t[i__3].i = q__1.i;
+ i__3 = k + k * t_dim1;
+ if ((r__1 = t[i__3].r, dabs(r__1)) + (r__2 = r_imag(&t[k + k *
+ t_dim1]), dabs(r__2)) < smin) {
+ i__4 = k + k * t_dim1;
+ t[i__4].r = smin, t[i__4].i = 0.f;
+ }
+/* L100: */
+ }
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ clatrs_("Upper", "Conjugate transpose", "Non-unit", "Y", &
+ i__2, &t[ki + 1 + (ki + 1) * t_dim1], ldt, &work[ki +
+ 1], &scale, &rwork[1], info);
+ i__2 = ki;
+ work[i__2].r = scale, work[i__2].i = 0.f;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ ccopy_(&i__2, &work[ki], &c__1, &vl[ki + is * vl_dim1], &c__1)
+ ;
+
+ i__2 = *n - ki + 1;
+ ii = icamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki - 1;
+ i__2 = ii + is * vl_dim1;
+ remax = 1.f / ((r__1 = vl[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&vl[ii + is * vl_dim1]), dabs(r__2)));
+ i__2 = *n - ki + 1;
+ csscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ i__3 = k + is * vl_dim1;
+ vl[i__3].r = 0.f, vl[i__3].i = 0.f;
+/* L110: */
+ }
+ } else {
+ if (ki < *n) {
+ i__2 = *n - ki;
+ q__1.r = scale, q__1.i = 0.f;
+ cgemv_("N", n, &i__2, &c_b56, &vl[(ki + 1) * vl_dim1 + 1],
+ ldvl, &work[ki + 1], &c__1, &q__1, &vl[ki *
+ vl_dim1 + 1], &c__1);
+ }
+
+ ii = icamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = ii + ki * vl_dim1;
+ remax = 1.f / ((r__1 = vl[i__2].r, dabs(r__1)) + (r__2 =
+ r_imag(&vl[ii + ki * vl_dim1]), dabs(r__2)));
+ csscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+/* Set back the original diagonal elements of T. */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ i__3 = k + k * t_dim1;
+ i__4 = k + *n;
+ t[i__3].r = work[i__4].r, t[i__3].i = work[i__4].i;
+/* L120: */
+ }
+
+ ++is;
+L130:
+ ;
+ }
+ }
+
+ return 0;
+
+/* End of CTREVC */
+
+} /* ctrevc_ */
+
+/* Subroutine */ int ctrti2_(char *uplo, char *diag, integer *n, complex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ complex q__1;
+
+ /* Builtin functions */
+ void c_div(complex *, complex *, complex *);
+
+ /* Local variables */
+ static integer j;
+ static complex ajj;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int ctrmv_(char *, char *, char *, integer *,
+ complex *, integer *, complex *, integer *), xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CTRTI2 computes the inverse of a complex upper or lower triangular
+ matrix.
+
+ This is the Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading n by n upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ c_div(&q__1, &c_b56, &a[j + j * a_dim1]);
+ a[i__2].r = q__1.r, a[i__2].i = q__1.i;
+ i__2 = j + j * a_dim1;
+ q__1.r = -a[i__2].r, q__1.i = -a[i__2].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ctrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ cscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ c_div(&q__1, &c_b56, &a[j + j * a_dim1]);
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+ i__1 = j + j * a_dim1;
+ q__1.r = -a[i__1].r, q__1.i = -a[i__1].i;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ } else {
+ q__1.r = -1.f, q__1.i = -0.f;
+ ajj.r = q__1.r, ajj.i = q__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ctrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ cscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of CTRTI2 */
+
+} /* ctrti2_ */
+
+/* Subroutine */ int ctrtri_(char *uplo, char *diag, integer *n, complex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2, i__3[2], i__4, i__5;
+ complex q__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int ctrmm_(char *, char *, char *, char *,
+ integer *, integer *, complex *, complex *, integer *, complex *,
+ integer *), ctrsm_(char *, char *,
+ char *, char *, integer *, integer *, complex *, complex *,
+ integer *, complex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int ctrti2_(char *, char *, integer *, complex *,
+ integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CTRTRI computes the inverse of a complex upper or lower triangular
+ matrix A.
+
+ This is the Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': A is upper triangular;
+ = 'L': A is lower triangular.
+
+ DIAG (input) CHARACTER*1
+ = 'N': A is non-unit triangular;
+ = 'U': A is unit triangular.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, A(i,i) is exactly zero. The triangular
+ matrix is singular and its inverse can not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0.f && a[i__2].i == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/*
+ Determine the block size for this environment.
+
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = uplo;
+ i__3[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "CTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ ctrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ ctrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b56, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ q__1, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ ctrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__2 = -nb;
+ for (j = nn; i__2 < 0 ? j >= 1 : j <= 1; j += i__2) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ ctrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b56, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ q__1.r = -1.f, q__1.i = -0.f;
+ ctrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &q__1, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ ctrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of CTRTRI */
+
+} /* ctrtri_ */
+
+/* Subroutine */ int cung2r_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CUNG2R generates an m by n complex matrix Q with orthonormal columns,
+ which is defined as the first n columns of a product of k elementary
+ reflectors of order m
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by CGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by CGEQRF in the first k columns of its array
+ argument A.
+ On exit, the m by n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQRF.
+
+ WORK (workspace) COMPLEX array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ clarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ i__2 = i__;
+ q__1.r = -tau[i__2].r, q__1.i = -tau[i__2].i;
+ cscal_(&i__1, &q__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ i__2 = i__;
+ q__1.r = 1.f - tau[i__2].r, q__1.i = 0.f - tau[i__2].i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = l + i__ * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNG2R */
+
+} /* cung2r_ */
+
+/* Subroutine */ int cungbr_(char *vect, integer *m, integer *n, integer *k,
+ complex *a, integer *lda, complex *tau, complex *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int cunglq_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *),
+ cungqr_(integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNGBR generates one of the complex unitary matrices Q or P**H
+ determined by CGEBRD when reducing a complex matrix A to bidiagonal
+ form: A = Q * B * P**H. Q and P**H are defined as products of
+ elementary reflectors H(i) or G(i) respectively.
+
+ If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
+ is of order M:
+ if m >= k, Q = H(1) H(2) . . . H(k) and CUNGBR returns the first n
+ columns of Q, where m >= n >= k;
+ if m < k, Q = H(1) H(2) . . . H(m-1) and CUNGBR returns Q as an
+ M-by-M matrix.
+
+ If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**H
+ is of order N:
+ if k < n, P**H = G(k) . . . G(2) G(1) and CUNGBR returns the first m
+ rows of P**H, where n >= m >= k;
+ if k >= n, P**H = G(n-1) . . . G(2) G(1) and CUNGBR returns P**H as
+ an N-by-N matrix.
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ Specifies whether the matrix Q or the matrix P**H is
+ required, as defined in the transformation applied by CGEBRD:
+ = 'Q': generate Q;
+ = 'P': generate P**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix Q or P**H to be returned.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q or P**H to be returned.
+ N >= 0.
+ If VECT = 'Q', M >= N >= min(M,K);
+ if VECT = 'P', N >= M >= min(N,K).
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original M-by-K
+ matrix reduced by CGEBRD.
+ If VECT = 'P', the number of rows in the original K-by-N
+ matrix reduced by CGEBRD.
+ K >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by CGEBRD.
+ On exit, the M-by-N matrix Q or P**H.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= M.
+
+ TAU (input) COMPLEX array, dimension
+ (min(M,K)) if VECT = 'Q'
+ (min(N,K)) if VECT = 'P'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i), which determines Q or P**H, as
+ returned by CGEBRD in its array argument TAUQ or TAUP.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,min(M,N)).
+ For optimum performance LWORK >= min(M,N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (((*n < 0) || (wantq && ((*n > *m) || (*n < min(*m,*k))))) || (!
+ wantq && ((*m > *n) || (*m < min(*n,*k))))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "CUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ } else {
+ nb = ilaenv_(&c__1, "CUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (wantq) {
+
+/*
+ Form Q, determined by a call to CGEBRD to reduce an m-by-k
+ matrix
+*/
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ cungqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If m < k, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first row and column of Q
+ to those of the unit matrix
+*/
+
+ for (j = *m; j >= 2; --j) {
+ i__1 = j * a_dim1 + 1;
+ a[i__1].r = 0.f, a[i__1].i = 0.f;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ + (j - 1) * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L10: */
+ }
+/* L20: */
+ }
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ cungqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/*
+ Form P', determined by a call to CGEBRD to reduce a k-by-n
+ matrix
+*/
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ cunglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If k >= n, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ row downward, and set the first row and column of P' to
+ those of the unit matrix
+*/
+
+ i__1 = a_dim1 + 1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ i__2 = i__ + a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ i__2 = i__ + j * a_dim1;
+ i__3 = i__ - 1 + j * a_dim1;
+ a[i__2].r = a[i__3].r, a[i__2].i = a[i__3].i;
+/* L50: */
+ }
+ i__2 = j * a_dim1 + 1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ cunglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGBR */
+
+} /* cungbr_ */
+
+/* Subroutine */ int cunghr_(integer *n, integer *ilo, integer *ihi, complex *
+ a, integer *lda, complex *tau, complex *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNGHR generates a complex unitary matrix Q which is defined as the
+ product of IHI-ILO elementary reflectors of order N, as returned by
+ CGEHRD:
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix Q. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ ILO and IHI must have the same values as in the previous call
+ of CGEHRD. Q is equal to the unit matrix except in the
+ submatrix Q(ilo+1:ihi,ilo+1:ihi).
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by CGEHRD.
+ On exit, the N-by-N unitary matrix Q.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (input) COMPLEX array, dimension (N-1)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEHRD.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= IHI-ILO.
+ For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "CUNGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lwkopt = max(1,nh) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+/*
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first ilo and the last n-ihi
+ rows and columns to those of the unit matrix
+*/
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ i__4 = i__ + (j - 1) * a_dim1;
+ a[i__3].r = a[i__4].r, a[i__3].i = a[i__4].i;
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L50: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L70: */
+ }
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ cungqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGHR */
+
+} /* cunghr_ */
+
+/* Subroutine */ int cungl2_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ complex q__1, q__2;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
+ integer *), clarf_(char *, integer *, integer *, complex *,
+ integer *, complex *, complex *, integer *, complex *),
+ clacgv_(integer *, complex *, integer *), xerbla_(char *, integer
+ *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNGL2 generates an m-by-n complex matrix Q with orthonormal rows,
+ which is defined as the first m rows of a product of k elementary
+ reflectors of order n
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by CGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by CGELQF in the first k rows of its array argument A.
+ On exit, the m by n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGELQF.
+
+ WORK (workspace) COMPLEX array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ i__3 = l + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ i__2 = j + j * a_dim1;
+ a[i__2].r = 1.f, a[i__2].i = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i)' to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ i__1 = *n - i__;
+ clacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ if (i__ < *m) {
+ i__1 = i__ + i__ * a_dim1;
+ a[i__1].r = 1.f, a[i__1].i = 0.f;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ r_cnjg(&q__1, &tau[i__]);
+ clarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ q__1, &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ i__2 = i__;
+ q__1.r = -tau[i__2].r, q__1.i = -tau[i__2].i;
+ cscal_(&i__1, &q__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__1 = *n - i__;
+ clacgv_(&i__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__1 = i__ + i__ * a_dim1;
+ r_cnjg(&q__2, &tau[i__]);
+ q__1.r = 1.f - q__2.r, q__1.i = 0.f - q__2.i;
+ a[i__1].r = q__1.r, a[i__1].i = q__1.i;
+
+/* Set A(i,1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ i__2 = i__ + l * a_dim1;
+ a[i__2].r = 0.f, a[i__2].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of CUNGL2 */
+
+} /* cungl2_ */
+
+/* Subroutine */ int cunglq_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cungl2_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNGLQ generates an M-by-N complex matrix Q with orthonormal rows,
+ which is defined as the first M rows of a product of K elementary
+ reflectors of order N
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by CGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by CGELQF in the first k rows of its array argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGELQF.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit;
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CUNGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*m) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGLQ", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGLQ", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk rows are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cungl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *n - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ clarfb_("Right", "Conjugate transpose", "Forward", "Rowwise",
+ &i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[
+ ib + 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ cungl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0.f, a[i__4].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGLQ */
+
+} /* cunglq_ */
+
+/* Subroutine */ int cungqr_(integer *m, integer *n, integer *k, complex *a,
+ integer *lda, complex *tau, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int cung2r_(integer *, integer *, integer *,
+ complex *, integer *, complex *, complex *, integer *), clarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ complex *, integer *, complex *, integer *, complex *, integer *,
+ complex *, integer *), clarft_(
+ char *, char *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNGQR generates an M-by-N complex matrix Q with orthonormal columns,
+ which is defined as the first N columns of a product of K elementary
+ reflectors of order M
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by CGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) COMPLEX array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by CGEQRF in the first k columns of its array
+ argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQRF.
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "CUNGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*n) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "CUNGQR", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNGQR", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk columns are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ i__3 = i__ + j * a_dim1;
+ a[i__3].r = 0.f, a[i__3].i = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ cung2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *m - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ clarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ cung2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ i__4 = l + j * a_dim1;
+ a[i__4].r = 0.f, a[i__4].i = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1].r = (real) iws, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNGQR */
+
+} /* cungqr_ */
+
+/* Subroutine */ int cunm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, mi, ni, nq;
+ static complex aii;
+ static logical left;
+ static complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CUNM2L overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by CGEQLF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQLF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &taui, &c__[
+ c_offset], ldc, &work[1]);
+ i__3 = nq - *k + i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNM2L */
+
+} /* cunm2l_ */
+
+/* Subroutine */ int cunm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static complex aii;
+ static logical left;
+ static complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CUNM2R overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by CGEQRF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQRF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ } else {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &taui, &c__[ic
+ + jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNM2R */
+
+} /* cunm2r_ */
+
+/* Subroutine */ int cunmbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, complex *a, integer *lda, complex *tau,
+ complex *c__, integer *ldc, complex *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int cunmlq_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ static logical notran;
+ extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *);
+ static logical applyq;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ If VECT = 'Q', CUNMBR overwrites the general complex M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ If VECT = 'P', CUNMBR overwrites the general complex M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': P * C C * P
+ TRANS = 'C': P**H * C C * P**H
+
+ Here Q and P**H are the unitary matrices determined by CGEBRD when
+ reducing a complex matrix A to bidiagonal form: A = Q * B * P**H. Q
+ and P**H are defined as products of elementary reflectors H(i) and
+ G(i) respectively.
+
+ Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
+ order of the unitary matrix Q or P**H that is applied.
+
+ If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
+ if nq >= k, Q = H(1) H(2) . . . H(k);
+ if nq < k, Q = H(1) H(2) . . . H(nq-1).
+
+ If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
+ if k < nq, P = G(1) G(2) . . . G(k);
+ if k >= nq, P = G(1) G(2) . . . G(nq-1).
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ = 'Q': apply Q or Q**H;
+ = 'P': apply P or P**H.
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q, Q**H, P or P**H from the Left;
+ = 'R': apply Q, Q**H, P or P**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q or P;
+ = 'C': Conjugate transpose, apply Q**H or P**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original
+ matrix reduced by CGEBRD.
+ If VECT = 'P', the number of rows in the original
+ matrix reduced by CGEBRD.
+ K >= 0.
+
+ A (input) COMPLEX array, dimension
+ (LDA,min(nq,K)) if VECT = 'Q'
+ (LDA,nq) if VECT = 'P'
+ The vectors which define the elementary reflectors H(i) and
+ G(i), whose products determine the matrices Q and P, as
+ returned by CGEBRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If VECT = 'Q', LDA >= max(1,nq);
+ if VECT = 'P', LDA >= max(1,min(nq,K)).
+
+ TAU (input) COMPLEX array, dimension (min(nq,K))
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i) which determines Q or P, as returned
+ by CGEBRD in the array argument TAUQ or TAUP.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q
+ or P*C or P**H*C or C*P or C*P**H.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
+ )) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ }
+
+/* Quick return if possible */
+
+ work[1].r = 1.f, work[1].i = 0.f;
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to CGEBRD with nq >= k */
+
+ cunmqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to CGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ cunmqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+ , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to CGEBRD with nq > k */
+
+ cunmlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to CGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ cunmlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
+ lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
+ lwork, &iinfo);
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMBR */
+
+} /* cunmbr_ */
+
+/* Subroutine */ int cunml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3;
+ complex q__1;
+
+ /* Builtin functions */
+ void r_cnjg(complex *, complex *);
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static complex aii;
+ static logical left;
+ static complex taui;
+ extern /* Subroutine */ int clarf_(char *, integer *, integer *, complex *
+ , integer *, complex *, complex *, integer *, complex *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int clacgv_(integer *, complex *, integer *),
+ xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ CUNML2 overwrites the general complex m-by-n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'C', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'C',
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by CGELQF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'C': apply Q' (Conjugate transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGELQF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the m-by-n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) COMPLEX array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) or H(i)' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) or H(i)' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) or H(i)' */
+
+ if (notran) {
+ r_cnjg(&q__1, &tau[i__]);
+ taui.r = q__1.r, taui.i = q__1.i;
+ } else {
+ i__3 = i__;
+ taui.r = tau[i__3].r, taui.i = tau[i__3].i;
+ }
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ clacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ i__3 = i__ + i__ * a_dim1;
+ aii.r = a[i__3].r, aii.i = a[i__3].i;
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = 1.f, a[i__3].i = 0.f;
+ clarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &taui, &c__[ic +
+ jc * c_dim1], ldc, &work[1]);
+ i__3 = i__ + i__ * a_dim1;
+ a[i__3].r = aii.r, a[i__3].i = aii.i;
+ if (i__ < nq) {
+ i__3 = nq - i__;
+ clacgv_(&i__3, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of CUNML2 */
+
+} /* cunml2_ */
+
+/* Subroutine */ int cunmlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static complex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int cunml2_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNMLQ overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k)' . . . H(2)' H(1)'
+
+ as returned by CGELQF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGELQF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ cunml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'C';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ clarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMLQ */
+
+} /* cunmlq_ */
+
+/* Subroutine */ int cunmql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static complex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int cunm2l_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNMQL overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by CGEQLF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQLF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ cunm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i+ib-1) . . . H(i+1) H(i)
+*/
+
+ i__4 = nq - *k + i__ + ib - 1;
+ clarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+ , lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMQL */
+
+} /* cunmql_ */
+
+/* Subroutine */ int cunmqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static complex t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int cunm2r_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *), clarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, complex *,
+ integer *, complex *, integer *, complex *, integer *, complex *,
+ integer *), clarft_(char *, char *
+ , integer *, integer *, complex *, integer *, complex *, complex *
+ , integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNMQR overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by CGEQRF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) COMPLEX array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ CGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) COMPLEX array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CGEQRF.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "C")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "CUNMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("CUNMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "CUNMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ cunm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ clarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ clarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMQR */
+
+} /* cunmqr_ */
+
+/* Subroutine */ int cunmtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, complex *a, integer *lda, complex *tau, complex *c__,
+ integer *ldc, complex *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int cunmql_(char *, char *, integer *, integer *,
+ integer *, complex *, integer *, complex *, complex *, integer *,
+ complex *, integer *, integer *), cunmqr_(char *,
+ char *, integer *, integer *, integer *, complex *, integer *,
+ complex *, complex *, integer *, complex *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ CUNMTR overwrites the general complex M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'C': Q**H * C C * Q**H
+
+ where Q is a complex unitary matrix of order nq, with nq = m if
+ SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
+ nq-1 elementary reflectors, as returned by CHETRD:
+
+ if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
+
+ if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**H from the Left;
+ = 'R': apply Q or Q**H from the Right.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A contains elementary reflectors
+ from CHETRD;
+ = 'L': Lower triangle of A contains elementary reflectors
+ from CHETRD.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'C': Conjugate transpose, apply Q**H.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ A (input) COMPLEX array, dimension
+ (LDA,M) if SIDE = 'L'
+ (LDA,N) if SIDE = 'R'
+ The vectors which define the elementary reflectors, as
+ returned by CHETRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
+
+ TAU (input) COMPLEX array, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by CHETRD.
+
+ C (input/output) COMPLEX array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**H*C or C*Q**H or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) COMPLEX array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >=M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQL", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQL", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "CUNMQR", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("CUNMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (nq == 1)) {
+ work[1].r = 1.f, work[1].i = 0.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to CHETRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ cunmql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
+ &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to CHETRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ cunmqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1].r = (real) lwkopt, work[1].i = 0.f;
+ return 0;
+
+/* End of CUNMTR */
+
+} /* cunmtr_ */
+
+/* Subroutine */ int dbdsdc_(char *uplo, char *compq, integer *n, doublereal *
+ d__, doublereal *e, doublereal *u, integer *ldu, doublereal *vt,
+ integer *ldvt, doublereal *q, integer *iq, doublereal *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), log(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static doublereal p, r__;
+ static integer z__, ic, ii, kk;
+ static doublereal cs;
+ static integer is, iu;
+ static doublereal sn;
+ static integer nm1;
+ static doublereal eps;
+ static integer ivt, difl, difr, ierr, perm, mlvl, sqre;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dcopy_(integer *, doublereal *, integer *
+ , doublereal *, integer *), dswap_(integer *, doublereal *,
+ integer *, doublereal *, integer *);
+ static integer poles, iuplo, nsize, start;
+ extern /* Subroutine */ int dlasd0_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ integer *, integer *, doublereal *, integer *);
+
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaset_(char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ static integer icompq;
+ static doublereal orgnrm;
+ static integer givnum, givptr, qstart, smlsiz, wstart, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ DBDSDC computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT,
+ using a divide and conquer method, where S is a diagonal matrix
+ with non-negative diagonal elements (the singular values of B), and
+ U and VT are orthogonal matrices of left and right singular vectors,
+ respectively. DBDSDC can be used to compute all singular values,
+ and optionally, singular vectors or singular vectors in compact form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See DLASD3 for details.
+
+ The code currently call DLASDQ if singular values only are desired.
+ However, it can be slightly modified to compute singular values
+ using the divide and conquer method.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal.
+ = 'L': B is lower bidiagonal.
+
+ COMPQ (input) CHARACTER*1
+ Specifies whether singular vectors are to be computed
+ as follows:
+ = 'N': Compute singular values only;
+ = 'P': Compute singular values and compute singular
+ vectors in compact form;
+ = 'I': Compute singular values and singular vectors.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the elements of E contain the offdiagonal
+ elements of the bidiagonal matrix whose SVD is desired.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array, dimension (LDU,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, U contains the left singular vectors
+ of the bidiagonal matrix.
+ For other values of COMPQ, U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1.
+ If singular vectors are desired, then LDU >= max( 1, N ).
+
+ VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, VT' contains the right singular
+ vectors of the bidiagonal matrix.
+ For other values of COMPQ, VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1.
+ If singular vectors are desired, then LDVT >= max( 1, N ).
+
+ Q (output) DOUBLE PRECISION array, dimension (LDQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, Q contains all the DOUBLE PRECISION data in
+ LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, Q is not referenced.
+
+ IQ (output) INTEGER array, dimension (LDIQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, IQ contains all INTEGER data in
+ LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, IQ is not referenced.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK)
+ If COMPQ = 'N' then LWORK >= (4 * N).
+ If COMPQ = 'P' then LWORK >= (6 * N).
+ If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N).
+
+ IWORK (workspace) INTEGER array, dimension (8*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value.
+ The update process of divide and conquer failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --q;
+ --iq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (lsame_(compq, "N")) {
+ icompq = 0;
+ } else if (lsame_(compq, "P")) {
+ icompq = 1;
+ } else if (lsame_(compq, "I")) {
+ icompq = 2;
+ } else {
+ icompq = -1;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (icompq < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ldu < 1) || (icompq == 2 && *ldu < *n)) {
+ *info = -7;
+ } else if ((*ldvt < 1) || (icompq == 2 && *ldvt < *n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSDC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ smlsiz = ilaenv_(&c__9, "DBDSDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+ if (*n == 1) {
+ if (icompq == 1) {
+ q[1] = d_sign(&c_b2865, &d__[1]);
+ q[smlsiz * *n + 1] = 1.;
+ } else if (icompq == 2) {
+ u[u_dim1 + 1] = d_sign(&c_b2865, &d__[1]);
+ vt[vt_dim1 + 1] = 1.;
+ }
+ d__[1] = abs(d__[1]);
+ return 0;
+ }
+ nm1 = *n - 1;
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ wstart = 1;
+ qstart = 3;
+ if (icompq == 1) {
+ dcopy_(n, &d__[1], &c__1, &q[1], &c__1);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
+ }
+ if (iuplo == 2) {
+ qstart = 5;
+ wstart = ((*n) << (1)) - 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (icompq == 1) {
+ q[i__ + ((*n) << (1))] = cs;
+ q[i__ + *n * 3] = sn;
+ } else if (icompq == 2) {
+ work[i__] = cs;
+ work[nm1 + i__] = -sn;
+ }
+/* L10: */
+ }
+ }
+
+/* If ICOMPQ = 0, use DLASDQ to compute the singular values. */
+
+ if (icompq == 0) {
+ dlasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ goto L40;
+ }
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &vt[vt_offset], ldvt);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ } else if (icompq == 1) {
+ iu = 1;
+ ivt = iu + *n;
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &q[iu + (qstart - 1) * *n],
+ n);
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &q[ivt + (qstart - 1) * *n]
+ , n);
+ dlasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
+ qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
+ iu + (qstart - 1) * *n], n, &work[wstart], info);
+ }
+ goto L40;
+ }
+
+ if (icompq == 2) {
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &u[u_offset], ldu);
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &vt[vt_offset], ldvt);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, n, &c__1, &d__[1], n, &ierr);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &nm1, &c__1, &e[1], &nm1, &
+ ierr);
+
+ eps = EPSILON;
+
+ mlvl = (integer) (log((doublereal) (*n) / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ smlszp = smlsiz + 1;
+
+ if (icompq == 1) {
+ iu = 1;
+ ivt = smlsiz + 1;
+ difl = ivt + smlszp;
+ difr = difl + mlvl;
+ z__ = difr + ((mlvl) << (1));
+ ic = z__ + mlvl;
+ is = ic + 1;
+ poles = is + 1;
+ givnum = poles + ((mlvl) << (1));
+
+ k = 1;
+ givptr = 2;
+ perm = 3;
+ givcol = perm + mlvl;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L20: */
+ }
+
+ start = 1;
+ sqre = 0;
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (((d__1 = e[i__], abs(d__1)) < eps) || (i__ == nm1)) {
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - start + 1;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - start + 1;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N). Solve this 1-by-1 problem
+ first.
+*/
+
+ nsize = i__ - start + 1;
+ if (icompq == 2) {
+ u[*n + *n * u_dim1] = d_sign(&c_b2865, &d__[*n]);
+ vt[*n + *n * vt_dim1] = 1.;
+ } else if (icompq == 1) {
+ q[*n + (qstart - 1) * *n] = d_sign(&c_b2865, &d__[*n]);
+ q[*n + (smlsiz + qstart - 1) * *n] = 1.;
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+ }
+ if (icompq == 2) {
+ dlasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
+ start * u_dim1], ldu, &vt[start + start * vt_dim1],
+ ldvt, &smlsiz, &iwork[1], &work[wstart], info);
+ } else {
+ dlasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
+ start], &q[start + (iu + qstart - 2) * *n], n, &q[
+ start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
+ &q[start + (difl + qstart - 2) * *n], &q[start + (
+ difr + qstart - 2) * *n], &q[start + (z__ + qstart -
+ 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
+ start + givptr * *n], &iq[start + givcol * *n], n, &
+ iq[start + perm * *n], &q[start + (givnum + qstart -
+ 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
+ start + (is + qstart - 2) * *n], &work[wstart], &
+ iwork[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ start = i__ + 1;
+ }
+/* L30: */
+ }
+
+/* Unscale */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, n, &c__1, &d__[1], n, &ierr);
+L40:
+
+/* Use Selection Sort to minimize swaps of singular vectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ kk = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] > p) {
+ kk = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (kk != i__) {
+ d__[kk] = d__[i__];
+ d__[i__] = p;
+ if (icompq == 1) {
+ iq[i__] = kk;
+ } else if (icompq == 2) {
+ dswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
+ c__1);
+ dswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
+ }
+ } else if (icompq == 1) {
+ iq[i__] = i__;
+ }
+/* L60: */
+ }
+
+/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
+
+ if (icompq == 1) {
+ if (iuplo == 1) {
+ iq[*n] = 1;
+ } else {
+ iq[*n] = 0;
+ }
+ }
+
+/*
+ If B is lower bidiagonal, update U by those Givens rotations
+ which rotated B to be upper bidiagonal
+*/
+
+ if (iuplo == 2 && icompq == 2) {
+ dlasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
+ }
+
+ return 0;
+
+/* End of DBDSDC */
+
+} /* dbdsdc_ */
+
+/* Subroutine */ int dbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, doublereal *d__, doublereal *e, doublereal *vt,
+ integer *ldvt, doublereal *u, integer *ldu, doublereal *c__, integer *
+ ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign(
+ doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal f, g, h__;
+ static integer i__, j, m;
+ static doublereal r__, cs;
+ static integer ll;
+ static doublereal sn, mu;
+ static integer nm1, nm12, nm13, lll;
+ static doublereal eps, sll, tol, abse;
+ static integer idir;
+ static doublereal abss;
+ static integer oldm;
+ static doublereal cosl;
+ static integer isub, iter;
+ static doublereal unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dlas2_(
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ static doublereal oldcs;
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ static integer oldll;
+ static doublereal shift, sigmn, oldsn;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer maxit;
+ static doublereal sminl, sigmx;
+ static logical lower;
+ extern /* Subroutine */ int dlasq1_(integer *, doublereal *, doublereal *,
+ doublereal *, integer *), dlasv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *);
+ static doublereal sminoa, thresh;
+ static logical rotate;
+ static doublereal sminlo, tolmul;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DBDSQR computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P'
+ denotes the transpose of P), where S is a diagonal matrix with
+ non-negative diagonal elements (the singular values of B), and Q
+ and P are orthogonal matrices.
+
+ The routine computes S, and optionally computes U * Q, P' * VT,
+ or Q' * C, for given real input matrices U, VT, and C.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11,
+ no. 5, pp. 873-912, Sept 1990) and
+ "Accurate singular values and differential qd algorithms," by
+ B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics
+ Department, University of California at Berkeley, July 1992
+ for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal;
+ = 'L': B is lower bidiagonal.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ NCVT (input) INTEGER
+ The number of columns of the matrix VT. NCVT >= 0.
+
+ NRU (input) INTEGER
+ The number of rows of the matrix U. NRU >= 0.
+
+ NCC (input) INTEGER
+ The number of columns of the matrix C. NCC >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B in decreasing
+ order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the elements of E contain the
+ offdiagonal elements of the bidiagonal matrix whose SVD
+ is desired. On normal exit (INFO = 0), E is destroyed.
+ If the algorithm does not converge (INFO > 0), D and E
+ will contain the diagonal and superdiagonal elements of a
+ bidiagonal matrix orthogonally equivalent to the one given
+ as input. E(N) is used for workspace.
+
+ VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
+ On entry, an N-by-NCVT matrix VT.
+ On exit, VT is overwritten by P' * VT.
+ VT is not referenced if NCVT = 0.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT.
+ LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0.
+
+ U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
+ On entry, an NRU-by-N matrix U.
+ On exit, U is overwritten by U * Q.
+ U is not referenced if NRU = 0.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max(1,NRU).
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
+ On entry, an N-by-NCC matrix C.
+ On exit, C is overwritten by Q' * C.
+ C is not referenced if NCC = 0.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C.
+ LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: If INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm did not converge; D and E contain the
+ elements of a bidiagonal matrix which is orthogonally
+ similar to the input matrix B; if INFO = i, i
+ elements of E have not converged to zero.
+
+ Internal Parameters
+ ===================
+
+ TOLMUL DOUBLE PRECISION, default = max(10,min(100,EPS**(-1/8)))
+ TOLMUL controls the convergence criterion of the QR loop.
+ If it is positive, TOLMUL*EPS is the desired relative
+ precision in the computed singular values.
+ If it is negative, abs(TOLMUL*EPS*sigma_max) is the
+ desired absolute accuracy in the computed singular
+ values (corresponds to relative accuracy
+ abs(TOLMUL*EPS) in the largest singular value.
+ abs(TOLMUL) should be between 1 and 1/EPS, and preferably
+ between 10 (for fast convergence) and .1/EPS
+ (for there to be some accuracy in the results).
+ Default is to lose at either one eighth or 2 of the
+ available decimal digits in each computed singular value
+ (whichever is smaller).
+
+ MAXITR INTEGER, default = 6
+ MAXITR controls the maximum number of passes of the
+ algorithm through its inner loop. The algorithms stops
+ (and so fails to converge) if the number of passes
+ through the inner loop exceeds MAXITR*N**2.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = ((*ncvt > 0) || (*nru > 0)) || (*ncc > 0);
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ dlasq1_(n, &d__[1], &e[1], &work[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = EPSILON;
+ unfl = SAFEMINIMUM;
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ work[i__] = cs;
+ work[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
+ ldc);
+ }
+ }
+
+/*
+ Compute singular values to relative accuracy TOL
+ (By setting TOL to be negative, algorithm will compute
+ singular values to absolute accuracy ABS(TOL)*norm(input matrix))
+
+ Computing MAX
+ Computing MIN
+*/
+ d__3 = 100., d__4 = pow_dd(&eps, &c_b2944);
+ d__1 = 10., d__2 = min(d__3,d__4);
+ tolmul = max(d__1,d__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = d__[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = smax, d__3 = (d__1 = e[i__], abs(d__1));
+ smax = max(d__2,d__3);
+/* L30: */
+ }
+ sminl = 0.;
+ if (tol >= 0.) {
+
+/* Relative accuracy desired */
+
+ sminoa = abs(d__[1]);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (d__2 = d__[i__], abs(d__2)) * (mu / (mu + (d__1 = e[i__ - 1]
+ , abs(d__1))));
+ sminoa = min(sminoa,mu);
+ if (sminoa == 0.) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((doublereal) (*n));
+/* Computing MAX */
+ d__1 = tol * sminoa, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ } else {
+
+/*
+ Absolute accuracy desired
+
+ Computing MAX
+*/
+ d__1 = abs(tol) * smax, d__2 = *n * 6 * *n * unfl;
+ thresh = max(d__1,d__2);
+ }
+
+/*
+ Prepare for main iteration loop for the singular values
+ (MAXIT is the maximum number of passes through the inner
+ loop permitted before nonconvergence signalled.)
+*/
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0. && (d__1 = d__[m], abs(d__1)) <= thresh) {
+ d__[m] = 0.;
+ }
+ smax = (d__1 = d__[m], abs(d__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (d__1 = d__[ll], abs(d__1));
+ abse = (d__1 = e[ll], abs(d__1));
+ if (tol < 0. && abss <= thresh) {
+ d__[ll] = 0.;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = min(smin,abss);
+/* Computing MAX */
+ d__1 = max(smax,abss);
+ smax = max(d__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ dlasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ drot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ drot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ drot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/*
+ If working on new submatrix, choose shift direction
+ (from larger end diagonal element towards smaller)
+*/
+
+ if ((ll > oldm) || (m < oldll)) {
+ if ((d__1 = d__[ll], abs(d__1)) >= (d__2 = d__[m], abs(d__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/*
+ Run convergence test in forward direction
+ First apply standard test to bottom of matrix
+*/
+
+ if (((d__2 = e[m - 1], abs(d__2)) <= abs(tol) * (d__1 = d__[m], abs(
+ d__1))) || (tol < 0. && (d__3 = e[m - 1], abs(d__3)) <=
+ thresh)) {
+ e[m - 1] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion forward
+*/
+
+ mu = (d__1 = d__[ll], abs(d__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (d__2 = d__[lll + 1], abs(d__2)) * (mu / (mu + (d__1 = e[
+ lll], abs(d__1))));
+ sminl = min(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/*
+ Run convergence test in backward direction
+ First apply standard test to top of matrix
+*/
+
+ if (((d__2 = e[ll], abs(d__2)) <= abs(tol) * (d__1 = d__[ll], abs(
+ d__1))) || (tol < 0. && (d__3 = e[ll], abs(d__3)) <= thresh))
+ {
+ e[ll] = 0.;
+ goto L60;
+ }
+
+ if (tol >= 0.) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion backward
+*/
+
+ mu = (d__1 = d__[m], abs(d__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((d__1 = e[lll], abs(d__1)) <= tol * mu) {
+ e[lll] = 0.;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (d__2 = d__[lll], abs(d__2)) * (mu / (mu + (d__1 = e[lll]
+ , abs(d__1))));
+ sminl = min(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/*
+ Compute shift. First, test if shifting would ruin relative
+ accuracy, and if so set the shift to zero.
+
+ Computing MAX
+*/
+ d__1 = eps, d__2 = tol * .01;
+ if (tol >= 0. && *n * tol * (sminl / smax) <= max(d__1,d__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (d__1 = d__[ll], abs(d__1));
+ dlas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (d__1 = d__[m], abs(d__1));
+ dlas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.) {
+/* Computing 2nd power */
+ d__1 = shift / sll;
+ if (d__1 * d__1 < eps) {
+ shift = 0.;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.) {
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ + 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll + 1] = cs;
+ work[i__ - ll + 1 + nm1] = sn;
+ work[i__ - ll + 1 + nm12] = oldcs;
+ work[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.;
+ oldcs = 1.;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ d__1 = d__[i__] * cs;
+ dlartg_(&d__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ d__1 = oldcs * r__;
+ d__2 = d__[i__ - 1] * sn;
+ dlartg_(&d__1, &d__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll] = cs;
+ work[i__ - ll + nm1] = -sn;
+ work[i__ - ll + nm12] = oldcs;
+ work[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((d__1 = d__[ll], abs(d__1)) - shift) * (d_sign(&c_b2865, &
+ d__[ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ work[i__ - ll + 1] = cosr;
+ work[i__ - ll + 1 + nm1] = sinr;
+ work[i__ - ll + 1 + nm12] = cosl;
+ work[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((d__1 = e[m - 1], abs(d__1)) <= thresh) {
+ e[m - 1] = 0.;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((d__1 = d__[m], abs(d__1)) - shift) * (d_sign(&c_b2865, &d__[
+ m]) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ dlartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ dlartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ work[i__ - ll] = cosr;
+ work[i__ - ll + nm1] = -sinr;
+ work[i__ - ll + nm12] = cosl;
+ work[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((d__1 = e[ll], abs(d__1)) <= thresh) {
+ e[ll] = 0.;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ dlasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ dscal_(ncvt, &c_b3001, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/*
+ Sort the singular values into decreasing order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of DBDSQR */
+
+} /* dbdsqr_ */
+
+/* Subroutine */ int dgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, doublereal *scale, integer *m, doublereal *v, integer *
+ ldv, integer *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal s;
+ static integer ii;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical leftv;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical rightv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DGEBAK forms the right or left eigenvectors of a real general matrix
+ by backward transformation on the computed eigenvectors of the
+ balanced matrix output by DGEBAL.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the type of backward transformation required:
+ = 'N', do nothing, return immediately;
+ = 'P', do backward transformation for permutation only;
+ = 'S', do backward transformation for scaling only;
+ = 'B', do backward transformations for both permutation and
+ scaling.
+ JOB must be the same as the argument JOB supplied to DGEBAL.
+
+ SIDE (input) CHARACTER*1
+ = 'R': V contains right eigenvectors;
+ = 'L': V contains left eigenvectors.
+
+ N (input) INTEGER
+ The number of rows of the matrix V. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ The integers ILO and IHI determined by DGEBAL.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ SCALE (input) DOUBLE PRECISION array, dimension (N)
+ Details of the permutation and scaling factors, as returned
+ by DGEBAL.
+
+ M (input) INTEGER
+ The number of columns of the matrix V. M >= 0.
+
+ V (input/output) DOUBLE PRECISION array, dimension (LDV,M)
+ On entry, the matrix of right or left eigenvectors to be
+ transformed, as returned by DHSEIN or DTREVC.
+ On exit, V is overwritten by the transformed eigenvectors.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V. LDV >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Decode and Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if ((lsame_(job, "S")) || (lsame_(job, "B"))) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1. / scale[i__];
+ dscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/*
+ Backward permutation
+
+ For I = ILO-1 step -1 until 1,
+ IHI+1 step 1 until N do --
+*/
+
+L30:
+ if ((lsame_(job, "P")) || (lsame_(job, "B"))) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = (integer) scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ dswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DGEBAK */
+
+} /* dgebak_ */
+
+/* Subroutine */ int dgebal_(char *job, integer *n, doublereal *a, integer *
+ lda, integer *ilo, integer *ihi, doublereal *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal r__, s, ca, ra;
+ static integer ica, ira, iexc;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static doublereal sfmin1, sfmin2, sfmax1, sfmax2;
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical noconv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEBAL balances a general real matrix A. This involves, first,
+ permuting A by a similarity transformation to isolate eigenvalues
+ in the first 1 to ILO-1 and last IHI+1 to N elements on the
+ diagonal; and second, applying a diagonal similarity transformation
+ to rows and columns ILO to IHI to make the rows and columns as
+ close in norm as possible. Both steps are optional.
+
+ Balancing may reduce the 1-norm of the matrix, and improve the
+ accuracy of the computed eigenvalues and/or eigenvectors.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the operations to be performed on A:
+ = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
+ for i = 1,...,N;
+ = 'P': permute only;
+ = 'S': scale only;
+ = 'B': both permute and scale.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the input matrix A.
+ On exit, A is overwritten by the balanced matrix.
+ If JOB = 'N', A is not referenced.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ ILO (output) INTEGER
+ IHI (output) INTEGER
+ ILO and IHI are set to integers such that on exit
+ A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
+ If JOB = 'N' or 'S', ILO = 1 and IHI = N.
+
+ SCALE (output) DOUBLE PRECISION array, dimension (N)
+ Details of the permutations and scaling factors applied to
+ A. If P(j) is the index of the row and column interchanged
+ with row and column j and D(j) is the scaling factor
+ applied to row and column j, then
+ SCALE(j) = P(j) for j = 1,...,ILO-1
+ = D(j) for j = ILO,...,IHI
+ = P(j) for j = IHI+1,...,N.
+ The order in which the interchanges are made is N to IHI+1,
+ then 1 to ILO-1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The permutations consist of row and column interchanges which put
+ the matrix in the form
+
+ ( T1 X Y )
+ P A P = ( 0 B Z )
+ ( 0 0 T2 )
+
+ where T1 and T2 are upper triangular matrices whose eigenvalues lie
+ along the diagonal. The column indices ILO and IHI mark the starting
+ and ending columns of the submatrix B. Balancing consists of applying
+ a diagonal similarity transformation inv(D) * B * D to make the
+ 1-norms of each row of B and its corresponding column nearly equal.
+ The output matrix is
+
+ ( T1 X*D Y )
+ ( 0 inv(D)*B*D inv(D)*Z ).
+ ( 0 0 T2 )
+
+ Information about the permutations P and the diagonal matrix D is
+ returned in the vector SCALE.
+
+ This subroutine is based on the EISPACK routine BALANC.
+
+ Modified by Tzu-Yi Chen, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (doublereal) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ dswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ dswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ if (a[j + i__ * a_dim1] != 0.) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ if (a[i__ + j * a_dim1] != 0.) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/*
+ Balance the submatrix in rows K to L.
+
+ Iterative loop for norm reduction
+*/
+
+ sfmin1 = SAFEMINIMUM / PRECISION;
+ sfmax1 = 1. / sfmin1;
+ sfmin2 = sfmin1 * 8.;
+ sfmax2 = 1. / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.;
+ r__ = 0.;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ c__ += (d__1 = a[j + i__ * a_dim1], abs(d__1));
+ r__ += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+L150:
+ ;
+ }
+ ica = idamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = (d__1 = a[ica + i__ * a_dim1], abs(d__1));
+ i__2 = *n - k + 1;
+ ira = idamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = (d__1 = a[i__ + (ira + k - 1) * a_dim1], abs(d__1));
+
+/* Guard against zero C or R due to underflow. */
+
+ if ((c__ == 0.) || (r__ == 0.)) {
+ goto L200;
+ }
+ g = r__ / 8.;
+ f = 1.;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ d__1 = max(f,c__);
+/* Computing MIN */
+ d__2 = min(r__,g);
+ if (((c__ >= g) || (max(d__1,ca) >= sfmax2)) || (min(d__2,ra) <=
+ sfmin2)) {
+ goto L170;
+ }
+ f *= 8.;
+ c__ *= 8.;
+ ca *= 8.;
+ r__ /= 8.;
+ g /= 8.;
+ ra /= 8.;
+ goto L160;
+
+L170:
+ g = c__ / 8.;
+L180:
+/* Computing MIN */
+ d__1 = min(f,c__), d__1 = min(d__1,g);
+ if (((g < r__) || (max(r__,ra) >= sfmax2)) || (min(d__1,ca) <= sfmin2)
+ ) {
+ goto L190;
+ }
+ f /= 8.;
+ c__ /= 8.;
+ g /= 8.;
+ ca /= 8.;
+ r__ *= 8.;
+ ra *= 8.;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95) {
+ goto L200;
+ }
+ if (f < 1. && scale[i__] < 1.) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1. && scale[i__] > 1.) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1. / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ dscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ dscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of DGEBAL */
+
+} /* dgebal_ */
+
+/* Subroutine */ int dgebd2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGEBD2 reduces a real general m by n matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q' * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (max(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tauq[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector G(i) to annihilate
+ A(i,i+2:n)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ } else {
+ taup[i__] = 0.;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+/* Computing MIN */
+ i__4 = i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
+ i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *m) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:m,i)
+*/
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ } else {
+ tauq[i__] = 0.;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGEBD2 */
+
+} /* dgebd2_ */
+
+/* Subroutine */ int dgebrd_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tauq, doublereal *
+ taup, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, nb, nx;
+ static doublereal ws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer nbmin, iinfo, minmn;
+ extern /* Subroutine */ int dgebd2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlabrd_(integer *, integer *, integer *
+ , doublereal *, integer *, doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *)
+ , xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwrkx, ldwrky, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEBRD reduces a general real M-by-N matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q**T * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,M,N).
+ For optimum performance LWORK >= (M+N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("DGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ ws = (doublereal) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/*
+ Set the crossover point NX.
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (doublereal) ((*m + *n) * nb);
+ if ((doublereal) (*lwork) < ws) {
+
+/*
+ Not enough work space for the optimal NB, consider using
+ a smaller block size.
+*/
+
+ nbmin = ilaenv_(&c__2, "DGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/*
+ Reduce rows and columns i:i+nb-1 to bidiagonal form and return
+ the matrices X and Y which are needed to update the unreduced
+ part of the matrix
+*/
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ dlabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/*
+ Update the trailing submatrix A(i+nb:m,i+nb:n), using an update
+ of the form A := A - V*Y' - X*U'
+*/
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b3001, &a[
+ i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
+ ldwrky, &c_b2865, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b3001, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b2865, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + (j + 1) * a_dim1] = e[j];
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + 1 + j * a_dim1] = e[j];
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1] = ws;
+ return 0;
+
+/* End of DGEBRD */
+
+} /* dgebrd_ */
+
+/* Subroutine */ int dgeev_(char *jobvl, char *jobvr, integer *n, doublereal *
+ a, integer *lda, doublereal *wr, doublereal *wi, doublereal *vl,
+ integer *ldvl, doublereal *vr, integer *ldvr, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal r__, cs, sn;
+ static integer ihi;
+ static doublereal scl;
+ static integer ilo;
+ static doublereal dum[1], eps;
+ static integer ibal;
+ static char side[1];
+ static integer maxb;
+ static doublereal anrm;
+ static integer ierr, itau;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer iwrk, nout;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebak_(
+ char *, char *, integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dgebal_(char *, integer *, doublereal *, integer *, integer *,
+ integer *, doublereal *, integer *);
+ static logical scalea;
+
+ static doublereal cscale;
+ extern doublereal dlange_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *);
+ extern /* Subroutine */ int dgehrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dlascl_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlartg_(doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *), xerbla_(char *, integer *);
+ static logical select[1];
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dorghr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dhseqr_(char *, char *, integer *, integer *, integer
+ *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dtrevc_(char *, char *, logical *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *);
+ static integer minwrk, maxwrk;
+ static logical wantvl;
+ static doublereal smlnum;
+ static integer hswork;
+ static logical lquery, wantvr;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 8, 1999
+
+
+ Purpose
+ =======
+
+ DGEEV computes for an N-by-N real nonsymmetric matrix A, the
+ eigenvalues and, optionally, the left and/or right eigenvectors.
+
+ The right eigenvector v(j) of A satisfies
+ A * v(j) = lambda(j) * v(j)
+ where lambda(j) is its eigenvalue.
+ The left eigenvector u(j) of A satisfies
+ u(j)**H * A = lambda(j) * u(j)**H
+ where u(j)**H denotes the conjugate transpose of u(j).
+
+ The computed eigenvectors are normalized to have Euclidean norm
+ equal to 1 and largest component real.
+
+ Arguments
+ =========
+
+ JOBVL (input) CHARACTER*1
+ = 'N': left eigenvectors of A are not computed;
+ = 'V': left eigenvectors of A are computed.
+
+ JOBVR (input) CHARACTER*1
+ = 'N': right eigenvectors of A are not computed;
+ = 'V': right eigenvectors of A are computed.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N matrix A.
+ On exit, A has been overwritten.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ WR and WI contain the real and imaginary parts,
+ respectively, of the computed eigenvalues. Complex
+ conjugate pairs of eigenvalues appear consecutively
+ with the eigenvalue having the positive imaginary part
+ first.
+
+ VL (output) DOUBLE PRECISION array, dimension (LDVL,N)
+ If JOBVL = 'V', the left eigenvectors u(j) are stored one
+ after another in the columns of VL, in the same order
+ as their eigenvalues.
+ If JOBVL = 'N', VL is not referenced.
+ If the j-th eigenvalue is real, then u(j) = VL(:,j),
+ the j-th column of VL.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and
+ u(j+1) = VL(:,j) - i*VL(:,j+1).
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= 1; if
+ JOBVL = 'V', LDVL >= N.
+
+ VR (output) DOUBLE PRECISION array, dimension (LDVR,N)
+ If JOBVR = 'V', the right eigenvectors v(j) are stored one
+ after another in the columns of VR, in the same order
+ as their eigenvalues.
+ If JOBVR = 'N', VR is not referenced.
+ If the j-th eigenvalue is real, then v(j) = VR(:,j),
+ the j-th column of VR.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and
+ v(j+1) = VR(:,j) - i*VR(:,j+1).
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= 1; if
+ JOBVR = 'V', LDVR >= N.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,3*N), and
+ if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good
+ performance, LWORK must generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = i, the QR algorithm failed to compute all the
+ eigenvalues, and no eigenvectors have been computed;
+ elements i+1:N of WR and WI contain eigenvalues which
+ have converged.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*ldvl < 1) || (wantvl && *ldvl < *n)) {
+ *info = -9;
+ } else if ((*ldvr < 1) || (wantvr && *ldvr < *n)) {
+ *info = -11;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.
+ HSWORK refers to the workspace preferred by DHSEQR, as
+ calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
+ the worst case.)
+*/
+
+ minwrk = 1;
+ if (*info == 0 && ((*lwork >= 1) || (lquery))) {
+ maxwrk = ((*n) << (1)) + *n * ilaenv_(&c__1, "DGEHRD", " ", n, &c__1,
+ n, &c__0, (ftnlen)6, (ftnlen)1);
+ if (! wantvl && ! wantvr) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "DHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "EN", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (2);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
+ "DORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "DHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "DHSEQR", "SV", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n) << (2);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1] = (doublereal) maxwrk;
+ }
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0. && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ dlascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/*
+ Balance the matrix
+ (Workspace: need N)
+*/
+
+ ibal = 1;
+ dgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/*
+ Reduce to upper Hessenberg form
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ itau = ibal + *n;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ dgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/*
+ Want left eigenvectors
+ Copy Householder vectors to VL
+*/
+
+ *(unsigned char *)side = 'L';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/*
+ Generate orthogonal matrix in VL
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VL
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/*
+ Want left and right eigenvectors
+ Copy Schur vectors to VR
+*/
+
+ *(unsigned char *)side = 'B';
+ dlacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/*
+ Want right eigenvectors
+ Copy Householder vectors to VR
+*/
+
+ *(unsigned char *)side = 'R';
+ dlacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/*
+ Generate orthogonal matrix in VR
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ dorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VR
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/*
+ Compute eigenvalues only
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ dhseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from DHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if ((wantvl) || (wantvr)) {
+
+/*
+ Compute left and/or right eigenvectors
+ (Workspace: need 4*N)
+*/
+
+ dtrevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+ if (wantvl) {
+
+/*
+ Undo balancing of left eigenvectors
+ (Workspace: need N)
+*/
+
+ dgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ d__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L10: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/*
+ Undo balancing of right eigenvectors
+ (Workspace: need N)
+*/
+
+ dgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.) {
+ scl = 1. / dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.) {
+ d__1 = dnrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ d__2 = dnrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1. / dlapy2_(&d__1, &d__2);
+ dscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ dscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ d__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ d__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[iwrk + k - 1] = d__1 * d__1 + d__2 * d__2;
+/* L30: */
+ }
+ k = idamax_(n, &work[iwrk], &c__1);
+ dlartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ drot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = ilo - 1;
+ dlascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGEEV */
+
+} /* dgeev_ */
+
+/* Subroutine */ int dgehd2_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DGEHD2 reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= max(1,N).
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the n by n general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ aii = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ dlarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ a[i__ + 1 + i__ * a_dim1] = aii;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of DGEHD2 */
+
+} /* dgehd2_ */
+
+/* Subroutine */ int dgehrd_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer ib;
+ static doublereal ei;
+ static integer nb, nh, nx, iws;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dgehd2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlahrd_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEHRD reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
+ zero.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ tau[i__] = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/*
+ Determine the block size.
+
+ Computing MIN
+*/
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEHRD", " ", n, ilo, ihi, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if ((nb < nbmin) || (nb >= nh)) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/*
+ Reduce columns i:i+ib-1 to Hessenberg form, returning the
+ matrices V and T of the block reflector H = I - V*T*V'
+ which performs the reduction, and also the matrix Y = A*V*T
+*/
+
+ dlahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/*
+ Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
+ right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
+ to 1.
+*/
+
+ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.;
+ i__3 = *ihi - i__ - ib + 1;
+ dgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b3001, &
+ work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b2865, &a[(i__ + ib) * a_dim1 + 1], lda);
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
+
+/*
+ Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
+ left
+*/
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
+ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
+/* L30: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ dgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1] = (doublereal) iws;
+
+ return 0;
+
+/* End of DGEHRD */
+
+} /* dgehrd_ */
+
+/* Subroutine */ int dgelq2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGELQ2 computes an LQ factorization of a real m by n matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m by min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1]
+ , lda, &tau[i__]);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dlarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGELQ2 */
+
+} /* dgelq2_ */
+
+/* Subroutine */ int dgelqf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgelq2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGELQF computes an LQ factorization of a real M-by-N matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m-by-min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *m * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGELQF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGELQF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the LQ factorization of the current block
+ A(i:i+ib-1,i:n)
+*/
+
+ i__3 = *n - i__ + 1;
+ dgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ dlarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGELQF */
+
+} /* dgelqf_ */
+
+/* Subroutine */ int dgelsd_(integer *m, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *
+ s, doublereal *rcond, integer *rank, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer ie, il, mm;
+ static doublereal eps, anrm, bnrm;
+ static integer itau, nlvl, iascl, ibscl;
+ static doublereal sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *), dgebrd_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlalsd_(char *, integer *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, integer *), dlascl_(char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ integer *, doublereal *, integer *, integer *), dgeqrf_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *);
+ static integer wlalsd;
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer minwrk, maxwrk;
+ static doublereal smlnum;
+ static logical lquery;
+ static integer smlsiz;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder transformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution
+ matrix X. If m >= n and RANK = n, the residual
+ sum-of-squares for the solution in the i-th column is given
+ by the sum of squares of elements n+1:m in that column.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,max(M,N)).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
+
+ RCOND (input) DOUBLE PRECISION
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
+
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2,
+ if M is greater than or equal to N or
+ 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2,
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+ For good performance, LWORK should generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace) INTEGER array, dimension (LIWORK)
+ LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
+ where MINMN = MIN( M,N ).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input arguments.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "DGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ minwrk = 1;
+ minmn = max(1,minmn);
+/* Computing MAX */
+ i__1 = (integer) (log((doublereal) minmn / (doublereal) (smlsiz + 1)) /
+ log(2.)) + 1;
+ nlvl = max(i__1,0);
+
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "DORMQR", "LT",
+ m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/*
+ Path 1 - overdetermined or exactly determined.
+
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "DGEBRD"
+ , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "DORMBR",
+ "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + ((*n) << (1)) * smlsiz + ((*n) << (3)) * nlvl +
+ *n * *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2),
+ i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + ((*m) << (1)) * smlsiz + ((*m) << (3)) * nlvl +
+ *m * *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &c_n1,
+ &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
+ * ilaenv_(&c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
+ ilaenv_(&c__1, "DORMBR", "QLT", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
+ ilaenv_(&c__1, "DORMBR", "PLN", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "DORMLQ",
+ "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + wlalsd;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "DORMBR"
+ , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR",
+ "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2),
+ i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (doublereal) maxwrk;
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if ((*m == 0) || (*n == 0)) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = PRECISION;
+ sfmin = SAFEMINIMUM;
+ smlnum = sfmin / eps;
+ bignum = 1. / smlnum;
+ dlabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ dlaset_("F", &i__1, nrhs, &c_b2879, &c_b2879, &b[b_offset], ldb);
+ dlaset_("F", &minmn, &c__1, &c_b2879, &c_b2879, &s[1], &c__1);
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = dlange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0. && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ dlascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b2879, &c_b2879, &b[*m + 1 + b_dim1],
+ ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (Workspace: need N+NRHS, prefer N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b2879, &c_b2879, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A.
+ (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
+ max(i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2))
+ {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
+ lda + *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b2879, &c_b2879, &work[il + ldwork],
+ &ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ dlaset_("F", &i__1, nrhs, &c_b2879, &c_b2879, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (Workspace: need M+NRHS, prefer M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A.
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ dlalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ dlascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (doublereal) maxwrk;
+ return 0;
+
+/* End of DGELSD */
+
+} /* dgelsd_ */
+
+/* Subroutine */ int dgeqr2_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static doublereal aii;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DGEQR2 computes a QR factorization of a real m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dlarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGEQR2 */
+
+} /* dgeqr2_ */
+
+/* Subroutine */ int dgeqrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, doublereal *tau, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dgeqr2_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *), dlarfb_(char *,
+ char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DGEQRF computes a QR factorization of a real M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the QR factorization of the current block
+ A(i:m,i:i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ dgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ dlarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
+ + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ dgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DGEQRF */
+
+} /* dgeqrf_ */
+
+/* Subroutine */ int dgesdd_(char *jobz, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *s, doublereal *u, integer *ldu,
+ doublereal *vt, integer *ldvt, doublereal *work, integer *lwork,
+ integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, ie, il, ir, iu, blk;
+ static doublereal dum[1], eps;
+ static integer ivt, iscl;
+ static doublereal anrm;
+ static integer idum[1], ierr, itau;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ static integer chunk, minmn, wrkbl, itaup, itauq, mnthr;
+ static logical wntqa;
+ static integer nwork;
+ static logical wntqn, wntqo, wntqs;
+ extern /* Subroutine */ int dbdsdc_(char *, char *, integer *, doublereal
+ *, doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dgebrd_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern doublereal dlamch_(char *), dlange_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *);
+ static integer bdspac;
+ extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dgeqrf_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *, integer *), dlacpy_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *), dorgbr_(char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static doublereal bignum;
+ extern /* Subroutine */ int dormbr_(char *, char *, char *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *), dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ static integer ldwrkl, ldwrkr, minwrk, ldwrku, maxwrk, ldwkvt;
+ static doublereal smlnum;
+ static logical wntqas, lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DGESDD computes the singular value decomposition (SVD) of a real
+ M-by-N matrix A, optionally computing the left and right singular
+ vectors. If singular vectors are desired, it uses a
+ divide-and-conquer algorithm.
+
+ The SVD is written
+
+ A = U * SIGMA * transpose(V)
+
+ where SIGMA is an M-by-N matrix which is zero except for its
+ min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and
+ V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA
+ are the singular values of A; they are real and non-negative, and
+ are returned in descending order. The first min(m,n) columns of
+ U and V are the left and right singular vectors of A.
+
+ Note that the routine returns VT = V**T, not V.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ Specifies options for computing all or part of the matrix U:
+ = 'A': all M columns of U and all N rows of V**T are
+ returned in the arrays U and VT;
+ = 'S': the first min(M,N) columns of U and the first
+ min(M,N) rows of V**T are returned in the arrays U
+ and VT;
+ = 'O': If M >= N, the first N columns of U are overwritten
+ on the array A and all rows of V**T are returned in
+ the array VT;
+ otherwise, all columns of U are returned in the
+ array U and the first M rows of V**T are overwritten
+ in the array VT;
+ = 'N': no columns of U or rows of V**T are computed.
+
+ M (input) INTEGER
+ The number of rows of the input matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the input matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit,
+ if JOBZ = 'O', A is overwritten with the first N columns
+ of U (the left singular vectors, stored
+ columnwise) if M >= N;
+ A is overwritten with the first M rows
+ of V**T (the right singular vectors, stored
+ rowwise) otherwise.
+ if JOBZ .ne. 'O', the contents of A are destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ S (output) DOUBLE PRECISION array, dimension (min(M,N))
+ The singular values of A, sorted so that S(i) >= S(i+1).
+
+ U (output) DOUBLE PRECISION array, dimension (LDU,UCOL)
+ UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
+ UCOL = min(M,N) if JOBZ = 'S'.
+ If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
+ orthogonal matrix U;
+ if JOBZ = 'S', U contains the first min(M,N) columns of U
+ (the left singular vectors, stored columnwise);
+ if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1; if
+ JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
+
+ VT (output) DOUBLE PRECISION array, dimension (LDVT,N)
+ If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
+ N-by-N orthogonal matrix V**T;
+ if JOBZ = 'S', VT contains the first min(M,N) rows of
+ V**T (the right singular vectors, stored rowwise);
+ if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1; if
+ JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
+ if JOBZ = 'S', LDVT >= min(M,N).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK;
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ If JOBZ = 'N',
+ LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)).
+ If JOBZ = 'O',
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)).
+ If JOBZ = 'S' or 'A'
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)).
+ For good performance, LWORK should generally be larger.
+ If LWORK < 0 but other input arguments are legal, WORK(1)
+ returns the optimal LWORK.
+
+ IWORK (workspace) INTEGER array, dimension (8*min(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: DBDSDC did not converge, updating process failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr = (integer) (minmn * 11. / 6.);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = (wntqa) || (wntqs);
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+ lquery = *lwork == -1;
+
+ if (! ((((wntqa) || (wntqs)) || (wntqo)) || (wntqn))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (((*ldu < 1) || (wntqas && *ldu < *m)) || (wntqo && *m < *n && *
+ ldu < *m)) {
+ *info = -8;
+ } else if ((((*ldvt < 1) || (wntqa && *ldvt < *n)) || (wntqs && *ldvt <
+ minmn)) || (wntqo && *m >= *n && *ldvt < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ if (*info == 0 && *m > 0 && *n > 0) {
+ if (*m >= *n) {
+
+/* Compute space needed for DBDSDC */
+
+ if (wntqn) {
+ bdspac = *n * 7;
+ } else {
+ bdspac = *n * 3 * *n + ((*n) << (2));
+ }
+ if (*m >= mnthr) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *n;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*n) << (1)) * *n;
+ minwrk = bdspac + ((*n) << (1)) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "DORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "DGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "DORGQR",
+ " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ }
+ } else {
+
+/* Path 5 (M at least N, but not much larger) */
+
+ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *m, i__2 = *n * *n + bdspac;
+ minwrk = *n * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ }
+ }
+ } else {
+
+/* Compute space needed for DBDSDC */
+
+ if (wntqn) {
+ bdspac = *m * 7;
+ } else {
+ bdspac = *m * 3 * *m + ((*m) << (2));
+ }
+ if (*n >= mnthr) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *m;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*m) << (1)) * *m;
+ minwrk = bdspac + ((*m) << (1)) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "DORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "DGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "DORGLQ",
+ " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "DGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ }
+ } else {
+
+/* Path 5t (N greater than M, but not much larger) */
+
+ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "DGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *n, i__2 = *m * *m + bdspac;
+ minwrk = *m * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "DORMBR"
+ , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ }
+ }
+ }
+ work[1] = (doublereal) maxwrk;
+ }
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ if (*lwork >= 1) {
+ work[1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = PRECISION;
+ smlnum = sqrt(SAFEMINIMUM) / eps;
+ bignum = 1. / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = dlange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0. && anrm < smlnum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ dlascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/*
+ A has at least as many rows as columns. If A has sufficiently
+ more rows than columns, first reduce using the QR
+ decomposition (if sufficient workspace available)
+*/
+
+ if (*m >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1 (M much larger than N, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b2879, &c_b2879, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need 4*N, prefer 3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *n;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2 (M much larger than N, JOBZ = 'O')
+ N left singular vectors to be overwritten on A and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is LDWRKR by N */
+
+ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
+ ldwrkr = *lda;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ dlaset_("L", &i__1, &i__2, &c_b2879, &c_b2879, &work[ir + 1],
+ &ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in VT, copying result to WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* WORK(IU) is N by N */
+
+ iu = nwork;
+ nwork = iu + *n * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R
+ and VT by right singular vectors of R
+ (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IU), storing result in WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b2865, &a[i__ + a_dim1],
+ lda, &work[iu], n, &c_b2879, &work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3 (M much larger than N, JOBZ='S')
+ N left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ dlacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b2879, &c_b2879, &work[ir + 1],
+ &ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagoal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IR), storing result in U
+ (Workspace: need N*N)
+*/
+
+ dlacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ dgemm_("N", "N", m, n, n, &c_b2865, &a[a_offset], lda, &work[
+ ir], &ldwrkr, &c_b2879, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/*
+ Path 4 (M much larger than N, JOBZ='A')
+ M left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R, copying result to U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Generate Q in U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+ i__2 = *lwork - nwork + 1;
+ dorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out other entries */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ dlaset_("L", &i__2, &i__1, &c_b2879, &c_b2879, &a[a_dim1 + 2],
+ lda);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in U by left singular vectors of R in
+ WORK(IU), storing result in A
+ (Workspace: need N*N)
+*/
+
+ dgemm_("N", "N", m, n, n, &c_b2865, &u[u_offset], ldu, &work[
+ iu], &ldwrku, &c_b2879, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else {
+
+/*
+ M .LT. MNTHR
+
+ Path 5 (M at least N, but not much larger)
+ Reduce to bidiagonal form without QR decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need N+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ nwork = iu + ldwrku * *n;
+ dlaset_("F", m, n, &c_b2879, &c_b2879, &work[iu], &ldwrku);
+ } else {
+
+/* WORK( IU ) is N by N */
+
+ ldwrku = *n;
+ nwork = iu + ldwrku * *n;
+
+/* WORK(IR) is LDWRKR by N */
+
+ ir = nwork;
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
+ 1], info);
+
+/*
+ Overwrite VT by right singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IU) by left singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy left singular vectors of A from WORK(IU) to A */
+
+ dlacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by left singular vectors of
+ bidiagonal matrix in WORK(IU), storing result in
+ WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__2 = *m;
+ i__1 = ldwrkr;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ dgemm_("N", "N", &chunk, n, n, &c_b2865, &a[i__ +
+ a_dim1], lda, &work[iu], &ldwrku, &c_b2879, &
+ work[ir], &ldwrkr);
+ dlacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dlaset_("F", m, n, &c_b2879, &c_b2879, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ dlaset_("F", m, m, &c_b2879, &c_b2879, &u[u_offset], ldu);
+ dbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ i__1 = *m - *n;
+ i__2 = *m - *n;
+ dlaset_("F", &i__1, &i__2, &c_b2879, &c_b2865, &u[*n + 1 + (*
+ n + 1) * u_dim1], ldu);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/*
+ A has more columns than rows. If A has sufficiently more
+ columns than rows, first reduce using the LQ decomposition (if
+ sufficient workspace available)
+*/
+
+ if (*n >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1t (N much larger than M, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out above L */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b2879, &c_b2879, &a[((a_dim1) <<
+ (1)) + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need 4*M, prefer 3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *m;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2t (N much larger than M, JOBZ='O')
+ M right singular vectors to be overwritten on A and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* IVT is M by M */
+
+ il = ivt + *m * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
+
+/* WORK(IL) is M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ }
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ dlaset_("U", &i__1, &i__2, &c_b2879, &c_b2879, &work[il +
+ ldwrkl], &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U, and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], m, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by Q
+ in A, storing result in WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b2865, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b2879, &work[il], &
+ ldwrkl);
+ dlacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L30: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3t (N much larger than M, JOBZ='S')
+ M right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ dlacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b2879, &c_b2879, &work[il +
+ ldwrkl], &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IU), copying result to U
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and VT
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IL) by
+ Q in A, storing result in VT
+ (Workspace: need M*M)
+*/
+
+ dlacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ dgemm_("N", "N", m, n, m, &c_b2865, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b2879, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/*
+ Path 4t (N much larger than M, JOBZ='A')
+ N right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q, copying result to VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ dlacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Generate Q in VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__2, &ierr);
+
+/* Produce L in A, zeroing out other entries */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ dlaset_("U", &i__2, &i__1, &c_b2879, &c_b2879, &a[((a_dim1) <<
+ (1)) + 1], lda);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ dbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by
+ Q in VT, storing result in A
+ (Workspace: need M*M)
+*/
+
+ dgemm_("N", "N", m, n, m, &c_b2865, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b2879, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ dlacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else {
+
+/*
+ N .LT. MNTHR
+
+ Path 5t (N greater than M, but not much larger)
+ Reduce to bidiagonal form without LQ decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need M+BDSPAC)
+*/
+
+ dbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* WORK( IVT ) is M by N */
+
+ dlaset_("F", m, n, &c_b2879, &c_b2879, &work[ivt], &
+ ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by M */
+
+ nwork = ivt + ldwkvt * *m;
+ il = nwork;
+
+/* WORK(IL) is M by CHUNK */
+
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M*M+BDSPAC)
+*/
+
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IVT) by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+
+/* Copy right singular vectors of A from WORK(IVT) to A */
+
+ dlacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate P**T in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ dorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by right singular vectors of
+ bidiagonal matrix in WORK(IVT), storing result in
+ WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ dgemm_("N", "N", m, &blk, m, &c_b2865, &work[ivt], &
+ ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b2879, &
+ work[il], m);
+ dlacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
+ 1], lda);
+/* L40: */
+ }
+ }
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dlaset_("F", m, n, &c_b2879, &c_b2879, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*M, prefer 2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ dlaset_("F", n, n, &c_b2879, &c_b2879, &vt[vt_offset], ldvt);
+ dbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of VT to identity matrix */
+
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ dlaset_("F", &i__1, &i__2, &c_b2879, &c_b2865, &vt[*m + 1 + (*
+ m + 1) * vt_dim1], ldvt);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 2*M+N, prefer 2*M+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ dormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ dormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ dlascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ dlascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (doublereal) maxwrk;
+
+ return 0;
+
+/* End of DGESDD */
+
+} /* dgesdd_ */
+
+/* Subroutine */ int dgesv_(integer *n, integer *nrhs, doublereal *a, integer
+ *lda, integer *ipiv, doublereal *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dgetrf_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *), dgetrs_(char *, integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGESV computes the solution to a real system of linear equations
+ A * X = B,
+ where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
+
+ The LU decomposition with partial pivoting and row interchanges is
+ used to factor A as
+ A = P * L * U,
+ where P is a permutation matrix, L is unit lower triangular, and U is
+ upper triangular. The factored form of A is then used to solve the
+ system of equations A * X = B.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of linear equations, i.e., the order of the
+ matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the N-by-N coefficient matrix A.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (output) INTEGER array, dimension (N)
+ The pivot indices that define the permutation matrix P;
+ row i of the matrix was interchanged with row IPIV(i).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the N-by-NRHS matrix of right hand side matrix B.
+ On exit, if INFO = 0, the N-by-NRHS solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, so the solution could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ dgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ dgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of DGESV */
+
+} /* dgesv_ */
+
+/* Subroutine */ int dgetf2_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer j, jp;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *), dscal_(integer *, doublereal *, doublereal *, integer
+ *), dswap_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1992
+
+
+ Purpose
+ =======
+
+ DGETF2 computes an LU factorization of a general m-by-n matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, U(k,k) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + idamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ if (a[jp + j * a_dim1] != 0.) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ dswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ i__2 = *m - j;
+ d__1 = 1. / a[j + j * a_dim1];
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ dger_(&i__2, &i__3, &c_b3001, &a[j + 1 + j * a_dim1], &c__1, &a[j
+ + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1],
+ lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of DGETF2 */
+
+} /* dgetf2_ */
+
+/* Subroutine */ int dgetrf_(integer *m, integer *n, doublereal *a, integer *
+ lda, integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer iinfo;
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dgetf2_(
+ integer *, integer *, doublereal *, integer *, integer *, integer
+ *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dlaswp_(integer *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGETRF computes an LU factorization of a general M-by-N matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the M-by-N matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ if ((nb <= 1) || (nb >= min(*m,*n))) {
+
+/* Use unblocked code. */
+
+ dgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/*
+ Factor diagonal and subdiagonal blocks and test for exact
+ singularity.
+*/
+
+ i__3 = *m - j + 1;
+ dgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ dlaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b2865, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ dgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &c_b3001, &a[j + jb + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda, &c_b2865, &a[j + jb + (j +
+ jb) * a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DGETRF */
+
+} /* dgetrf_ */
+
+/* Subroutine */ int dgetrs_(char *trans, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, integer *ipiv, doublereal *b, integer *
+ ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), xerbla_(
+ char *, integer *), dlaswp_(integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DGETRS solves a system of linear equations
+ A * X = B or A' * X = B
+ with a general N-by-N matrix A using the LU factorization computed
+ by DGETRF.
+
+ Arguments
+ =========
+
+ TRANS (input) CHARACTER*1
+ Specifies the form of the system of equations:
+ = 'N': A * X = B (No transpose)
+ = 'T': A'* X = B (Transpose)
+ = 'C': A'* X = B (Conjugate transpose = Transpose)
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The factors L and U from the factorization A = P*L*U
+ as computed by DGETRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (input) INTEGER array, dimension (N)
+ The pivot indices from DGETRF; for 1<=i<=N, row i of the
+ matrix was interchanged with row IPIV(i).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (notran) {
+
+/*
+ Solve A * X = B.
+
+ Apply row interchanges to the right hand sides.
+*/
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b2865, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b2865,
+ &a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A' * X = B.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b2865, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b2865, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ dlaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of DGETRS */
+
+} /* dgetrs_ */
+
+/* Subroutine */ int dhseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, doublereal *h__, integer *ldh, doublereal *wr,
+ doublereal *wi, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ doublereal d__1, d__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__, j, k, l;
+ static doublereal s[225] /* was [15][15] */, v[16];
+ static integer i1, i2, ii, nh, nr, ns, nv;
+ static doublereal vv[16];
+ static integer itn;
+ static doublereal tau;
+ static integer its;
+ static doublereal ulp, tst1;
+ static integer maxb;
+ static doublereal absw;
+ static integer ierr;
+ static doublereal unfl, temp, ovfl;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static integer itemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical initz, wantt, wantz;
+ extern doublereal dlapy2_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlabad_(doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ extern /* Subroutine */ int dlahqr_(logical *, logical *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, integer *, doublereal *, integer *,
+ integer *), dlacpy_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlaset_(char *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlarfx_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *);
+ static doublereal smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DHSEQR computes the eigenvalues of a real upper Hessenberg matrix H
+ and, optionally, the matrices T and Z from the Schur decomposition
+ H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur
+ form), and Z is the orthogonal matrix of Schur vectors.
+
+ Optionally Z may be postmultiplied into an input orthogonal matrix Q,
+ so that this routine can give the Schur factorization of a matrix A
+ which has been reduced to the Hessenberg form H by the orthogonal
+ matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ = 'E': compute eigenvalues only;
+ = 'S': compute eigenvalues and the Schur form T.
+
+ COMPZ (input) CHARACTER*1
+ = 'N': no Schur vectors are computed;
+ = 'I': Z is initialized to the unit matrix and the matrix Z
+ of Schur vectors of H is returned;
+ = 'V': Z must contain an orthogonal matrix Q on entry, and
+ the product Q*Z is returned.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to DGEBAL, and then passed to SGEHRD
+ when the matrix output by DGEBAL is reduced to Hessenberg
+ form. Otherwise ILO and IHI should be set to 1 and N
+ respectively.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if JOB = 'S', H contains the upper quasi-triangular
+ matrix T from the Schur decomposition (the Schur form);
+ 2-by-2 diagonal blocks (corresponding to complex conjugate
+ pairs of eigenvalues) are returned in standard form, with
+ H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E',
+ the contents of H are unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues. If two eigenvalues are computed as a complex
+ conjugate pair, they are stored in consecutive elements of
+ WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and
+ WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the
+ same order as on the diagonal of the Schur form returned in
+ H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2
+ diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and
+ WI(i+1) = -WI(i).
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ If COMPZ = 'N': Z is not referenced.
+ If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
+ contains the orthogonal matrix Z of the Schur vectors of H.
+ If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
+ which is assumed to be equal to the unit matrix except for
+ the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
+ Normally Q is the orthogonal matrix generated by DORGHR after
+ the call to DGEHRD which formed the Hessenberg matrix H.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z.
+ LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, DHSEQR failed to compute all of the
+ eigenvalues in a total of 30*(IHI-ILO+1) iterations;
+ elements 1:ilo-1 and i+1:n of WR and WI contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = (initz) || (lsame_(compz, "V"));
+
+ *info = 0;
+ work[1] = (doublereal) max(1,*n);
+ lquery = *lwork == -1;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if ((*ldz < 1) || (wantz && *ldz < max(1,*n))) {
+ *info = -11;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DHSEQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Initialize Z, if necessary */
+
+ if (initz) {
+ dlaset_("Full", n, n, &c_b2879, &c_b2865, &z__[z_offset], ldz);
+ }
+
+/* Store the eigenvalues isolated by DGEBAL. */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+/* L20: */
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+/*
+ Set rows and columns ILO to IHI to zero below the first
+ subdiagonal.
+*/
+
+ i__1 = *ihi - 2;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+ h__[i__ + j * h_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ nh = *ihi - *ilo + 1;
+
+/*
+ Determine the order of the multi-shift QR algorithm to be used.
+
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ ns = ilaenv_(&c__4, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ maxb = ilaenv_(&c__8, "DHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if (((ns <= 2) || (ns > nh)) || (maxb >= nh)) {
+
+/* Use the standard double-shift algorithm */
+
+ dlahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
+ 1], ilo, ihi, &z__[z_offset], ldz, info);
+ return 0;
+ }
+ maxb = max(3,maxb);
+/* Computing MIN */
+ i__1 = min(ns,maxb);
+ ns = min(i__1,15);
+
+/*
+ Now 2 < NS <= MAXB < NH.
+
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of multiple-shift QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of at most MAXB. Each iteration of the loop
+ works with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L50:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L170;
+ }
+
+/*
+ Perform multiple-shift QR iterations on rows and columns ILO to I
+ until a submatrix of order at most MAXB splits off at the bottom
+ because a subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + k * h_dim1], abs(d__2));
+ if (tst1 == 0.) {
+ i__4 = i__ - l + 1;
+ tst1 = dlanhs_("1", &i__4, &h__[l + l * h_dim1], ldh, &work[1]
+ );
+ }
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
+ smlnum)) {
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible. */
+
+ h__[l + (l - 1) * h_dim1] = 0.;
+ }
+
+/* Exit from loop if a submatrix of order <= MAXB has split off. */
+
+ if (l >= i__ - maxb + 1) {
+ goto L160;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! wantt) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 20) || (its == 30)) {
+
+/* Exceptional shifts. */
+
+ i__2 = i__;
+ for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
+ wr[ii] = ((d__1 = h__[ii + (ii - 1) * h_dim1], abs(d__1)) + (
+ d__2 = h__[ii + ii * h_dim1], abs(d__2))) * 1.5;
+ wi[ii] = 0.;
+/* L80: */
+ }
+ } else {
+
+/* Use eigenvalues of trailing submatrix of order NS as shifts. */
+
+ dlacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
+ h_dim1], ldh, s, &c__15);
+ dlahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ -
+ ns + 1], &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset],
+ ldz, &ierr);
+ if (ierr > 0) {
+
+/*
+ If DLAHQR failed to compute all NS eigenvalues, use the
+ unconverged diagonal elements as the remaining shifts.
+*/
+
+ i__2 = ierr;
+ for (ii = 1; ii <= i__2; ++ii) {
+ wr[i__ - ns + ii] = s[ii + ii * 15 - 16];
+ wi[i__ - ns + ii] = 0.;
+/* L90: */
+ }
+ }
+ }
+
+/*
+ Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
+ where G is the Hessenberg submatrix H(L:I,L:I) and w is
+ the vector of shifts (stored in WR and WI). The result is
+ stored in the local array V.
+*/
+
+ v[0] = 1.;
+ i__2 = ns + 1;
+ for (ii = 2; ii <= i__2; ++ii) {
+ v[ii - 1] = 0.;
+/* L100: */
+ }
+ nv = 1;
+ i__2 = i__;
+ for (j = i__ - ns + 1; j <= i__2; ++j) {
+ if (wi[j] >= 0.) {
+ if (wi[j] == 0.) {
+
+/* real shift */
+
+ i__4 = nv + 1;
+ dcopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ d__1 = -wr[j];
+ dgemv_("No transpose", &i__4, &nv, &c_b2865, &h__[l + l *
+ h_dim1], ldh, vv, &c__1, &d__1, v, &c__1);
+ ++nv;
+ } else if (wi[j] > 0.) {
+
+/* complex conjugate pair of shifts */
+
+ i__4 = nv + 1;
+ dcopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ d__1 = wr[j] * -2.;
+ dgemv_("No transpose", &i__4, &nv, &c_b2865, &h__[l + l *
+ h_dim1], ldh, v, &c__1, &d__1, vv, &c__1);
+ i__4 = nv + 1;
+ itemp = idamax_(&i__4, vv, &c__1);
+/* Computing MAX */
+ d__2 = (d__1 = vv[itemp - 1], abs(d__1));
+ temp = 1. / max(d__2,smlnum);
+ i__4 = nv + 1;
+ dscal_(&i__4, &temp, vv, &c__1);
+ absw = dlapy2_(&wr[j], &wi[j]);
+ temp = temp * absw * absw;
+ i__4 = nv + 2;
+ i__5 = nv + 1;
+ dgemv_("No transpose", &i__4, &i__5, &c_b2865, &h__[l + l
+ * h_dim1], ldh, vv, &c__1, &temp, v, &c__1);
+ nv += 2;
+ }
+
+/*
+ Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
+ reset it to the unit vector.
+*/
+
+ itemp = idamax_(&nv, v, &c__1);
+ temp = (d__1 = v[itemp - 1], abs(d__1));
+ if (temp == 0.) {
+ v[0] = 1.;
+ i__4 = nv;
+ for (ii = 2; ii <= i__4; ++ii) {
+ v[ii - 1] = 0.;
+/* L110: */
+ }
+ } else {
+ temp = max(temp,smlnum);
+ d__1 = 1. / temp;
+ dscal_(&nv, &d__1, v, &c__1);
+ }
+ }
+/* L120: */
+ }
+
+/* Multiple-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = l; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__4 = ns + 1, i__5 = i__ - k + 1;
+ nr = min(i__4,i__5);
+ if (k > l) {
+ dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ dlarfg_(&nr, v, &v[1], &c__1, &tau);
+ if (k > l) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ i__4 = i__;
+ for (ii = k + 1; ii <= i__4; ++ii) {
+ h__[ii + (k - 1) * h_dim1] = 0.;
+/* L130: */
+ }
+ }
+ v[0] = 1.;
+
+/*
+ Apply G from the left to transform the rows of the matrix in
+ columns K to I2.
+*/
+
+ i__4 = i2 - k + 1;
+ dlarfx_("Left", &nr, &i__4, v, &tau, &h__[k + k * h_dim1], ldh, &
+ work[1]);
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+NR,I).
+
+ Computing MIN
+*/
+ i__5 = k + nr;
+ i__4 = min(i__5,i__) - i1 + 1;
+ dlarfx_("Right", &i__4, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
+ &work[1]);
+
+ if (wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ dlarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
+ ldz, &work[1]);
+ }
+/* L140: */
+ }
+
+/* L150: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L160:
+
+/*
+ A submatrix of order <= MAXB in rows and columns L to I has split
+ off. Use the double-shift QR algorithm to handle it.
+*/
+
+ dlahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1],
+ ilo, ihi, &z__[z_offset], ldz, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with a new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L50;
+
+L170:
+ work[1] = (doublereal) max(1,*n);
+ return 0;
+
+/* End of DHSEQR */
+
+} /* dhseqr_ */
+
+/* Subroutine */ int dlabad_(doublereal *small, doublereal *large)
+{
+ /* Builtin functions */
+ double d_lg10(doublereal *), sqrt(doublereal);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLABAD takes as input the values computed by DLAMCH for underflow and
+ overflow, and returns the square root of each of these values if the
+ log of LARGE is sufficiently large. This subroutine is intended to
+ identify machines with a large exponent range, such as the Crays, and
+ redefine the underflow and overflow limits to be the square roots of
+ the values computed by DLAMCH. This subroutine is needed because
+ DLAMCH does not compensate for poor arithmetic in the upper half of
+ the exponent range, as is found on a Cray.
+
+ Arguments
+ =========
+
+ SMALL (input/output) DOUBLE PRECISION
+ On entry, the underflow threshold as computed by DLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of SMALL, otherwise unchanged.
+
+ LARGE (input/output) DOUBLE PRECISION
+ On entry, the overflow threshold as computed by DLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of LARGE, otherwise unchanged.
+
+ =====================================================================
+
+
+ If it looks like we're on a Cray, take the square root of
+ SMALL and LARGE to avoid overflow and underflow problems.
+*/
+
+ if (d_lg10(large) > 2e3) {
+ *small = sqrt(*small);
+ *large = sqrt(*large);
+ }
+
+ return 0;
+
+/* End of DLABAD */
+
+} /* dlabad_ */
+
+/* Subroutine */ int dlabrd_(integer *m, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *d__, doublereal *e, doublereal *tauq,
+ doublereal *taup, doublereal *x, integer *ldx, doublereal *y, integer
+ *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLABRD reduces the first NB rows and columns of a real general
+ m by n matrix A to upper or lower bidiagonal form by an orthogonal
+ transformation Q' * A * P, and returns the matrices X and Y which
+ are needed to apply the transformation to the unreduced part of A.
+
+ If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
+ bidiagonal form.
+
+ This is an auxiliary routine called by DGEBRD
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A.
+
+ N (input) INTEGER
+ The number of columns in the matrix A.
+
+ NB (input) INTEGER
+ The number of leading rows and columns of A to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit, the first NB rows and columns of the matrix are
+ overwritten; the rest of the array is unchanged.
+ If m >= n, elements on and below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors; and
+ elements above the diagonal in the first NB rows, with the
+ array TAUP, represent the orthogonal matrix P as a product
+ of elementary reflectors.
+ If m < n, elements below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors, and
+ elements on and above the diagonal in the first NB rows,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) DOUBLE PRECISION array, dimension (NB)
+ The diagonal elements of the first NB rows and columns of
+ the reduced matrix. D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (NB)
+ The off-diagonal elements of the first NB rows and columns of
+ the reduced matrix.
+
+ TAUQ (output) DOUBLE PRECISION array dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) DOUBLE PRECISION array, dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ X (output) DOUBLE PRECISION array, dimension (LDX,NB)
+ The m-by-nb matrix X required to update the unreduced part
+ of A.
+
+ LDX (input) INTEGER
+ The leading dimension of the array X. LDX >= M.
+
+ Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
+ The n-by-nb matrix Y required to update the unreduced part
+ of A.
+
+ LDY (output) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors.
+
+ If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
+ A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The elements of the vectors v and u together form the m-by-nb matrix
+ V and the nb-by-n matrix U' which are needed, with X and Y, to apply
+ the transformation to the unreduced part of the matrix, using a block
+ update of the form: A := A - V*Y' - X*U'.
+
+ The contents of A on exit are illustrated by the following examples
+ with nb = 2:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
+ ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
+ ( v1 v2 a a a ) ( v1 1 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix which is unchanged,
+ vi denotes an element of the vector defining H(i), and ui an element
+ of the vector defining G(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[i__ + a_dim1],
+ lda, &y[i__ + y_dim1], ldy, &c_b2865, &a[i__ + i__ *
+ a_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &x[i__ + x_dim1],
+ ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2865, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + (i__ + 1)
+ * a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &
+ c_b2879, &y[i__ + 1 + i__ * y_dim1], &c__1)
+ ;
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + a_dim1],
+ lda, &a[i__ + i__ * a_dim1], &c__1, &c_b2879, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2865, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &x[i__ + x_dim1],
+ ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b2879, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b3001, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2865, &y[i__ + 1 + i__ * y_dim1], &c__1)
+ ;
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b3001, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b2865, &a[i__
+ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b3001, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b2865, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 + (
+ i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b2879, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b2865, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b2879, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b3001, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2865, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b2865, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b2879, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2865, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &y[i__ + y_dim1],
+ ldy, &a[i__ + a_dim1], lda, &c_b2865, &a[i__ + i__ *
+ a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b3001, &a[i__ * a_dim1 + 1],
+ lda, &x[i__ + x_dim1], ldx, &c_b2865, &a[i__ + i__ *
+ a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ dlarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 +
+ i__ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &
+ c_b2879, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &y[i__ + y_dim1],
+ ldy, &a[i__ + i__ * a_dim1], lda, &c_b2879, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b2865, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b2865, &a[i__ *
+ a_dim1 + 1], lda, &a[i__ + i__ * a_dim1], lda, &
+ c_b2879, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b2865, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ dscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b2865, &a[i__
+ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("No transpose", &i__2, &i__, &c_b3001, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b2865, &
+ a[i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &c_b2879, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b2879, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b2865, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ dgemv_("Transpose", &i__2, &i__, &c_b2865, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b2879, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("Transpose", &i__, &i__2, &c_b3001, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b2865, &y[i__ + 1 + i__ * y_dim1], &c__1)
+ ;
+ i__2 = *n - i__;
+ dscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of DLABRD */
+
+} /* dlabrd_ */
+
+/* Subroutine */ int dlacpy_(char *uplo, integer *m, integer *n, doublereal *
+ a, integer *lda, doublereal *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLACPY copies all or part of a two-dimensional matrix A to another
+ matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper triangle
+ or trapezoid is accessed; if UPLO = 'L', only the lower
+ triangle or trapezoid is accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) DOUBLE PRECISION array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ return 0;
+
+/* End of DLACPY */
+
+} /* dlacpy_ */
+
+/* Subroutine */ int dladiv_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *p, doublereal *q)
+{
+ static doublereal e, f;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLADIV performs complex division in real arithmetic
+
+ a + i*b
+ p + i*q = ---------
+ c + i*d
+
+ The algorithm is due to Robert L. Smith and can be found
+ in D. Knuth, The art of Computer Programming, Vol.2, p.195
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ B (input) DOUBLE PRECISION
+ C (input) DOUBLE PRECISION
+ D (input) DOUBLE PRECISION
+ The scalars a, b, c, and d in the above expression.
+
+ P (output) DOUBLE PRECISION
+ Q (output) DOUBLE PRECISION
+ The scalars p and q in the above expression.
+
+ =====================================================================
+*/
+
+
+ if (abs(*d__) < abs(*c__)) {
+ e = *d__ / *c__;
+ f = *c__ + *d__ * e;
+ *p = (*a + *b * e) / f;
+ *q = (*b - *a * e) / f;
+ } else {
+ e = *c__ / *d__;
+ f = *d__ + *c__ * e;
+ *p = (*b + *a * e) / f;
+ *q = (-(*a) + *b * e) / f;
+ }
+
+ return 0;
+
+/* End of DLADIV */
+
+} /* dladiv_ */
+
+/* Subroutine */ int dlae2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal ab, df, tb, sm, rt, adf, acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, and RT2
+ is the eigenvalue of smaller absolute value.
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) DOUBLE PRECISION
+ The (1,2) and (2,1) elements of the 2-by-2 matrix.
+
+ C (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) DOUBLE PRECISION
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) DOUBLE PRECISION
+ The eigenvalue of smaller absolute value.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ }
+ return 0;
+
+/* End of DLAE2 */
+
+} /* dlae2_ */
+
+/* Subroutine */ int dlaed0_(integer *icompq, integer *qsiz, integer *n,
+ doublereal *d__, doublereal *e, doublereal *q, integer *ldq,
+ doublereal *qstore, integer *ldqs, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
+ static doublereal temp;
+ static integer curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer iperm;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer indxq, iwrem;
+ extern /* Subroutine */ int dlaed1_(integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *);
+ static integer iqptr;
+ extern /* Subroutine */ int dlaed7_(integer *, integer *, integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, integer *);
+ static integer tlvls;
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ static integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer igivnm, submat, curprb, subpbs, igivpt;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ static integer curlvl, matsiz, iprmpt, smlsiz;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED0 computes all eigenvalues and corresponding eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+ = 2: Compute eigenvalues and eigenvectors of tridiagonal
+ matrix.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the main diagonal of the tridiagonal matrix.
+ On exit, its eigenvalues.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, Q must contain an N-by-N orthogonal matrix.
+ If ICOMPQ = 0 Q is not referenced.
+ If ICOMPQ = 1 On entry, Q is a subset of the columns of the
+ orthogonal matrix used to reduce the full
+ matrix to tridiagonal form corresponding to
+ the subset of the full matrix which is being
+ decomposed at this time.
+ If ICOMPQ = 2 On entry, Q will be the identity matrix.
+ On exit, Q contains the eigenvectors of the
+ tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. If eigenvectors are
+ desired, then LDQ >= max(1,N). In any case, LDQ >= 1.
+
+ QSTORE (workspace) DOUBLE PRECISION array, dimension (LDQS, N)
+ Referenced only when ICOMPQ = 1. Used to store parts of
+ the eigenvector matrix when the updating matrix multiplies
+ take place.
+
+ LDQS (input) INTEGER
+ The leading dimension of the array QSTORE. If ICOMPQ = 1,
+ then LDQS >= max(1,N). In any case, LDQS >= 1.
+
+ WORK (workspace) DOUBLE PRECISION array,
+ If ICOMPQ = 0 or 1, the dimension of WORK must be at least
+ 1 + 3*N + 2*N*lg N + 2*N**2
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of WORK must be at least
+ 4*N + N**2.
+
+ IWORK (workspace) INTEGER array,
+ If ICOMPQ = 0 or 1, the dimension of IWORK must be at least
+ 6 + 6*N + 5*N*lg N.
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of IWORK must be at least
+ 3 + 5*N.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 2)) {
+ *info = -1;
+ } else if (*icompq == 1 && *qsiz < max(0,*n)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Determine the size and placement of the submatrices, and save in
+ the leading elements of IWORK.
+*/
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[((j) << (1)) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/*
+ Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
+ using rank-1 modifications (cuts).
+*/
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (d__1 = e[smm1], abs(d__1));
+ d__[submat] -= (d__1 = e[smm1], abs(d__1));
+/* L40: */
+ }
+
+ indxq = ((*n) << (2)) + 3;
+ if (*icompq != 2) {
+
+/*
+ Set up workspaces for eigenvalues only/accumulate new vectors
+ routine
+*/
+
+ temp = log((doublereal) (*n)) / log(2.);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + ((*n) << (1)) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+
+/* Initialize pointers */
+
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+ }
+
+/*
+ Solve each submatrix eigenproblem at the bottom of the divide and
+ conquer tree.
+*/
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ if (*icompq == 2) {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
+ submat * q_dim1], ldq, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ } else {
+ dsteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
+ iwork[iqptr + curr]], &matsiz, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b2865, &q[submat *
+ q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]]
+ , &matsiz, &c_b2879, &qstore[submat * qstore_dim1 + 1]
+ , ldqs);
+ }
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ Successively merge eigensystems of adjacent submatrices
+ into eigensystem for the corresponding larger matrix.
+
+ while ( SUBPBS > 1 )
+*/
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/*
+ Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
+ into an eigensystem of size MATSIZ.
+ DLAED1 is used only for the full eigensystem of a tridiagonal
+ matrix.
+ DLAED7 handles the cases in which eigenvalues only or eigenvalues
+ and eigenvectors of a full symmetric matrix (which was reduced to
+ tridiagonal form) are desired.
+*/
+
+ if (*icompq == 2) {
+ dlaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
+ ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
+ msd2, &work[1], &iwork[subpbs + 1], info);
+ } else {
+ dlaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
+ iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
+ work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
+ , &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
+ work[iwrem], &iwork[subpbs + 1], info);
+ }
+ if (*info != 0) {
+ goto L130;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/*
+ end while
+
+ Re-merge the eigenvalues/vectors which were deflated at the final
+ merge step.
+*/
+
+ if (*icompq == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
+ + 1], &c__1);
+/* L100: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ } else if (*icompq == 2) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ dcopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
+/* L110: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ dlacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+/* L120: */
+ }
+ dcopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ }
+ goto L140;
+
+L130:
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+
+L140:
+ return 0;
+
+/* End of DLAED0 */
+
+} /* dlaed0_ */
+
+/* Subroutine */ int dlaed1_(integer *n, doublereal *d__, doublereal *q,
+ integer *ldq, integer *indxq, doublereal *rho, integer *cutpnt,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, zpp1, indx, indxc;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer indxp;
+ extern /* Subroutine */ int dlaed2_(integer *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *, integer *, integer *, integer *), dlaed3_(integer *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, doublereal *, integer *);
+ static integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED1 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and eigenvectors of a tridiagonal matrix. DLAED7 handles
+ the case in which eigenvalues only or eigenvalues and eigenvectors
+ of a full symmetric matrix (which was reduced to tridiagonal form)
+ are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLAED2.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine DLAED4 (as called by DLAED3).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ On entry, the permutation which separately sorts the two
+ subproblems in D into ascending order.
+ On exit, the permutation which will reintegrate the
+ subproblems back into sorted order,
+ i.e. D( INDXQ( I = 1, N ) ) will be in ascending order.
+
+ RHO (input) DOUBLE PRECISION
+ The subdiagonal entry used to create the rank-1 modification.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= CUTPNT <= N/2.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N + N**2)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ldq < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if ((min(i__1,i__2) > *cutpnt) || (*n / 2 < *cutpnt)) {
+ *info = -7;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED1", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are integer pointers which indicate
+ the portion of the workspace
+ used by a particular array in DLAED2 and DLAED3.
+*/
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ dcopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
+ zpp1 = *cutpnt + 1;
+ i__1 = *n - *cutpnt;
+ dcopy_(&i__1, &q[zpp1 + zpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
+
+/* Deflate eigenvalues. */
+
+ dlaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
+ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
+ indxc], &iwork[indxp], &iwork[coltyp], info);
+
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
+ 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
+ dlaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
+ &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
+ is], info);
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L10: */
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of DLAED1 */
+
+} /* dlaed1_ */
+
+/* Subroutine */ int dlaed2_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, integer *indxq, doublereal *rho,
+ doublereal *z__, doublereal *dlamda, doublereal *w, doublereal *q2,
+ integer *indx, integer *indxc, integer *indxp, integer *coltyp,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j;
+ static doublereal s, t;
+ static integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
+ static doublereal eps, tau, tol;
+ static integer psm[4], imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer ctot[4];
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dcopy_(integer *, doublereal *, integer *, doublereal
+ *, integer *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLAED2 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation. 0 <= K <=N.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the eigenvalues of the two submatrices to
+ be combined.
+ On exit, D contains the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, Q contains the eigenvectors of two submatrices in
+ the two square blocks with corners at (1,1), (N1,N1)
+ and (N1+1, N1+1), (N,N).
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have N1 added to their
+ values. Destroyed on exit.
+
+ RHO (input/output) DOUBLE PRECISION
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ DLAED3.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ On entry, Z contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+ On exit, the contents of Z have been destroyed by the updating
+ process.
+
+ DLAMDA (output) DOUBLE PRECISION array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ DLAED3 to form the secular equation.
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ The first k values of the final deflation-altered z-vector
+ which will be passed to DLAED3.
+
+ Q2 (output) DOUBLE PRECISION array, dimension (N1**2+(N-N1)**2)
+ A copy of the first K eigenvectors which will be used by
+ DLAED3 in a matrix multiply (DGEMM) to solve for the new
+ eigenvectors.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of DLAMDA into
+ ascending order.
+
+ INDXC (output) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups: the first group contains non-zero
+ elements only at and above N1, the second contains
+ non-zero elements only below N1, and the third is dense.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ COLTYP (workspace/output) INTEGER array, dimension (N)
+ During execution, a label which will indicate which of the
+ following types a column in the Q2 matrix is:
+ 1 : non-zero in the upper half only;
+ 2 : dense;
+ 3 : non-zero in the lower half only;
+ 4 : deflated.
+ On exit, COLTYP(i) is the number of columns of type i,
+ for i=1 to 4 only.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ --w;
+ --q2;
+ --indx;
+ --indxc;
+ --indxp;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if ((min(i__1,i__2) > *n1) || (*n / 2 < *n1)) {
+ *info = -3;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n2 = *n - *n1;
+ n1p1 = *n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b3001, &z__[n1p1], &c__1);
+ }
+
+/*
+ Normalize z so that norm(z) = 1. Since z is the concatenation of
+ two normalized vectors, norm2(z) = sqrt(2).
+*/
+
+ t = 1. / sqrt(2.);
+ dscal_(n, &t, &z__[1], &c__1);
+
+/* RHO = ABS( norm(z)**2 * RHO ) */
+
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ indxq[i__] += *n1;
+/* L10: */
+ }
+
+/* re-integrate the deflated parts from the last pass */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+/* L20: */
+ }
+ dlamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indx[i__] = indxq[indxc[i__]];
+/* L30: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = EPSILON;
+/* Computing MAX */
+ d__3 = (d__1 = d__[jmax], abs(d__1)), d__4 = (d__2 = z__[imax], abs(d__2))
+ ;
+ tol = eps * 8. * max(d__3,d__4);
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ iq2 = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = indx[j];
+ dcopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ dlamda[j] = d__[i__];
+ iq2 += *n;
+/* L40: */
+ }
+ dlacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
+ dcopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
+ goto L190;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L50: */
+ }
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 3;
+/* L60: */
+ }
+
+
+ *k = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ nj = indx[j];
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ pj = nj;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ nj = indx[j];
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[nj], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[pj];
+ c__ = z__[nj];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[nj] - d__[pj];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[nj] = tau;
+ z__[pj] = 0.;
+ if (coltyp[nj] != coltyp[pj]) {
+ coltyp[nj] = 2;
+ }
+ coltyp[pj] = 4;
+ drot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
+ c__, &s);
+/* Computing 2nd power */
+ d__1 = c__;
+/* Computing 2nd power */
+ d__2 = s;
+ t = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+/* Computing 2nd power */
+ d__1 = s;
+/* Computing 2nd power */
+ d__2 = c__;
+ d__[nj] = d__[pj] * (d__1 * d__1) + d__[nj] * (d__2 * d__2);
+ d__[pj] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[pj] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = pj;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ pj = nj;
+ } else {
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+ pj = nj;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four uniform groups (although one or more of these groups may be
+ empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L110: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L120: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 1;
+ psm[1] = ctot[0] + 1;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+ *k = *n - ctot[3];
+
+/*
+ Fill out the INDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ js = indxp[j];
+ ct = coltyp[js];
+ indx[psm[ct - 1]] = js;
+ indxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L130: */
+ }
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ i__ = 1;
+ iq1 = 1;
+ iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
+ i__1 = ctot[0];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+/* L140: */
+ }
+
+ i__1 = ctot[1];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+ iq2 += n2;
+/* L150: */
+ }
+
+ i__1 = ctot[2];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq2 += n2;
+/* L160: */
+ }
+
+ iq1 = iq2;
+ i__1 = ctot[3];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ dcopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ iq2 += *n;
+ z__[i__] = d__[js];
+ ++i__;
+/* L170: */
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ dlacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
+ i__1 = *n - *k;
+ dcopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Copy CTOT into COLTYP for referencing in DLAED3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L180: */
+ }
+
+L190:
+ return 0;
+
+/* End of DLAED2 */
+
+} /* dlaed2_ */
+
+/* Subroutine */ int dlaed3_(integer *k, integer *n, integer *n1, doublereal *
+ d__, doublereal *q, integer *ldq, doublereal *rho, doublereal *dlamda,
+ doublereal *q2, integer *indx, integer *ctot, doublereal *w,
+ doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, n2, n12, ii, n23, iq2;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *), dlaed4_(integer *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED3 finds the roots of the secular equation, as defined by the
+ values in D, W, and RHO, between 1 and K. It makes the
+ appropriate calls to DLAED4 and then updates the eigenvectors by
+ multiplying the matrix of eigenvectors of the pair of eigensystems
+ being combined by the matrix of eigenvectors of the K-by-K system
+ which is solved here.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ DLAED4. K >= 0.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (deflation may result in N>K).
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading submatrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ D(I) contains the updated eigenvalues for
+ 1 <= I <= K.
+
+ Q (output) DOUBLE PRECISION array, dimension (LDQ,N)
+ Initially the first K columns are used as workspace.
+ On output the columns 1 to K contain
+ the updated eigenvectors.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ RHO (input) DOUBLE PRECISION
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input/output) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation. May be changed on output by
+ having lowest order bit set to zero on Cray X-MP, Cray Y-MP,
+ Cray-2, or Cray C-90, as described above.
+
+ Q2 (input) DOUBLE PRECISION array, dimension (LDQ2, N)
+ The first K columns of this matrix contain the non-deflated
+ eigenvectors for the split problem.
+
+ INDX (input) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups (see DLAED2).
+ The rows of the eigenvectors found by DLAED4 must be likewise
+ permuted before the matrix multiply can take place.
+
+ CTOT (input) INTEGER array, dimension (4)
+ A count of the total number of the various types of columns
+ in Q, as described in INDX. The fourth column type is any
+ column which has been deflated.
+
+ W (input/output) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector. Destroyed on
+ output.
+
+ S (workspace) DOUBLE PRECISION array, dimension (N1 + 1)*K
+ Will contain the eigenvectors of the repaired matrix which
+ will be multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max(1,K).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --q2;
+ --indx;
+ --ctot;
+ --w;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*n < *k) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1) {
+ goto L110;
+ }
+ if (*k == 2) {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ w[1] = q[j * q_dim1 + 1];
+ w[2] = q[j * q_dim1 + 2];
+ ii = indx[1];
+ q[j * q_dim1 + 1] = w[ii];
+ ii = indx[2];
+ q[j * q_dim1 + 2] = w[ii];
+/* L30: */
+ }
+ goto L110;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[1], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L40: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+/* L60: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__]);
+/* L70: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__] = w[i__] / q[i__ + j * q_dim1];
+/* L80: */
+ }
+ temp = dnrm2_(k, &s[1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ii = indx[i__];
+ q[i__ + j * q_dim1] = s[ii] / temp;
+/* L90: */
+ }
+/* L100: */
+ }
+
+/* Compute the updated eigenvectors. */
+
+L110:
+
+ n2 = *n - *n1;
+ n12 = ctot[1] + ctot[2];
+ n23 = ctot[2] + ctot[3];
+
+ dlacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
+ iq2 = *n1 * n12 + 1;
+ if (n23 != 0) {
+ dgemm_("N", "N", &n2, k, &n23, &c_b2865, &q2[iq2], &n2, &s[1], &n23, &
+ c_b2879, &q[*n1 + 1 + q_dim1], ldq);
+ } else {
+ dlaset_("A", &n2, k, &c_b2879, &c_b2879, &q[*n1 + 1 + q_dim1], ldq);
+ }
+
+ dlacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
+ if (n12 != 0) {
+ dgemm_("N", "N", n1, k, &n12, &c_b2865, &q2[1], n1, &s[1], &n12, &
+ c_b2879, &q[q_offset], ldq);
+ } else {
+ dlaset_("A", n1, k, &c_b2879, &c_b2879, &q[q_dim1 + 1], ldq);
+ }
+
+
+L120:
+ return 0;
+
+/* End of DLAED3 */
+
+} /* dlaed3_ */
+
+/* Subroutine */ int dlaed4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *dlam,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal a, b, c__;
+ static integer j;
+ static doublereal w;
+ static integer ii;
+ static doublereal dw, zz[3];
+ static integer ip1;
+ static doublereal del, eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static doublereal dphi, dpsi;
+ static integer iter;
+ static doublereal temp, prew, temp1, dltlb, dltub, midpt;
+ static integer niter;
+ static logical swtch;
+ extern /* Subroutine */ int dlaed5_(integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), dlaed6_(integer *,
+ logical *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ static logical swtch3;
+
+ static logical orgati;
+ static doublereal erretm, rhoinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ December 23, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th updated eigenvalue of a symmetric
+ rank-one modification to a diagonal matrix whose elements are
+ given in the array d, and that
+
+ D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) DOUBLE PRECISION array, dimension (N)
+ The original eigenvalues. It is assumed that they are in
+ order, D(I) < D(J) for I < J.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension (N)
+ If N .ne. 1, DELTA contains (D(j) - lambda_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *dlam = d__[1] + *rho * z__[1] * z__[1];
+ delta[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = EPSILON;
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ midpt = *rho / 2.;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
+ n];
+
+ if (w <= 0.) {
+ temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
+ + z__[*n] * z__[*n] / *rho;
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
+ ;
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO
+*/
+
+ dltlb = midpt;
+ dltub = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/*
+ It can be proved that
+ D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2
+*/
+
+ dltlb = 0.;
+ dltub = midpt;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
+ dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+/*
+ ETA = B/A
+ ETA = RHO - TAU
+*/
+ eta = dltub - tau;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L50: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
+ (dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L70: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ *dlam = d__[*i__] + tau;
+ goto L250;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ del = d__[ip1] - d__[*i__];
+ midpt = del / 2.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / delta[j];
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
+ delta[ip1];
+
+ if (w > 0.) {
+
+/*
+ d(i)< the ith eigenvalue < (d(i)+d(i+1))/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * del;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+ dltlb = 0.;
+ dltub = midpt;
+ } else {
+
+/*
+ (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1)
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * del;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+ dltlb = -midpt;
+ dltub = 0.;
+ }
+
+ if (orgati) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[ip1] - tau;
+/* L140: */
+ }
+ }
+ if (orgati) {
+ ii = *i__;
+ } else {
+ ii = *i__ + 1;
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if ((ii == 1) || (ii == *n)) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (d__1 *
+ d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (d__1 *
+ d__1);
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
+ dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
+ (dpsi + dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
+ iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
+ iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ prew = w;
+
+/* L170: */
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L180: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L190: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L200: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. + (
+ d__1 = tau + eta, abs(d__1)) * dw;
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+ tau += eta;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.) {
+ dltlb = max(dltlb,tau);
+ } else {
+ dltub = min(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
+ d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
+ (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / delta[ii];
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
+ * dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] *
+ delta[ip1] * (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
+ *i__] * (dpsi + dphi);
+ }
+ } else {
+ a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
+ * delta[ip1] * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
+ zz[0] = delta[iim1] * delta[iim1] * dpsi;
+ zz[2] = delta[iip1] * delta[iip1] * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
+ - d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
+ dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
+ - d__[iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
+ temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dlaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
+ info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.) {
+ eta = (dltub - tau) / 2.;
+ } else {
+ eta = (dltlb - tau) / 2.;
+ }
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L210: */
+ }
+
+ tau += eta;
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L220: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L230: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if (w * prew > 0. && abs(w) > abs(prew) / 10.) {
+ swtch = ! swtch;
+ }
+
+/* L240: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+
+ }
+
+L250:
+
+ return 0;
+
+/* End of DLAED4 */
+
+} /* dlaed4_ */
+
+/* Subroutine */ int dlaed5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dlam)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal b, c__, w, del, tau, temp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th eigenvalue of a symmetric rank-one
+ modification of a 2-by-2 diagonal matrix
+
+ diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal elements in the array D are assumed to satisfy
+
+ D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) DOUBLE PRECISION array, dimension (2)
+ The original eigenvalues. We assume D(1) < D(2).
+
+ Z (input) DOUBLE PRECISION array, dimension (2)
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension (2)
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ if (*i__ == 1) {
+ w = *rho * 2. * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.;
+ if (w > 0.) {
+ b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * del;
+
+/* B > ZERO, always */
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+ *dlam = d__[1] + tau;
+ delta[1] = -z__[1] / tau;
+ delta[2] = z__[2] / (del - tau);
+ } else {
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ }
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ } else {
+
+/* Now I=2 */
+
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ }
+ return 0;
+
+/* End OF DLAED5 */
+
+} /* dlaed5_ */
+
+/* Subroutine */ int dlaed6_(integer *kniter, logical *orgati, doublereal *
+ rho, doublereal *d__, doublereal *z__, doublereal *finit, doublereal *
+ tau, integer *info)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *);
+
+ /* Local variables */
+ static doublereal a, b, c__, f;
+ static integer i__;
+ static doublereal fc, df, ddf, eta, eps, base;
+ static integer iter;
+ static doublereal temp, temp1, temp2, temp3, temp4;
+ static logical scale;
+ static integer niter;
+ static doublereal small1, small2, sminv1, sminv2;
+
+ static doublereal dscale[3], sclfac, zscale[3], erretm, sclinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAED6 computes the positive or negative root (closest to the origin)
+ of
+ z(1) z(2) z(3)
+ f(x) = rho + --------- + ---------- + ---------
+ d(1)-x d(2)-x d(3)-x
+
+ It is assumed that
+
+ if ORGATI = .true. the root is between d(2) and d(3);
+ otherwise it is between d(1) and d(2)
+
+ This routine will be called by DLAED4 when necessary. In most cases,
+ the root sought is the smallest in magnitude, though it might not be
+ in some extremely rare situations.
+
+ Arguments
+ =========
+
+ KNITER (input) INTEGER
+ Refer to DLAED4 for its significance.
+
+ ORGATI (input) LOGICAL
+ If ORGATI is true, the needed root is between d(2) and
+ d(3); otherwise it is between d(1) and d(2). See
+ DLAED4 for further details.
+
+ RHO (input) DOUBLE PRECISION
+ Refer to the equation f(x) above.
+
+ D (input) DOUBLE PRECISION array, dimension (3)
+ D satisfies d(1) < d(2) < d(3).
+
+ Z (input) DOUBLE PRECISION array, dimension (3)
+ Each of the elements in z must be positive.
+
+ FINIT (input) DOUBLE PRECISION
+ The value of f at 0. It is more accurate than the one
+ evaluated inside this routine (if someone wants to do
+ so).
+
+ TAU (output) DOUBLE PRECISION
+ The root of the equation f(x).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, failure to converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+ --d__;
+
+ /* Function Body */
+
+ *info = 0;
+
+ niter = 1;
+ *tau = 0.;
+ if (*kniter == 2) {
+ if (*orgati) {
+ temp = (d__[3] - d__[2]) / 2.;
+ c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
+ a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
+ b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
+ } else {
+ temp = (d__[1] - d__[2]) / 2.;
+ c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
+ a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
+ b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
+ }
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ *tau = b / a;
+ } else if (a <= 0.) {
+ *tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ *tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))
+ ));
+ }
+ temp = *rho + z__[1] / (d__[1] - *tau) + z__[2] / (d__[2] - *tau) +
+ z__[3] / (d__[3] - *tau);
+ if (abs(*finit) <= abs(temp)) {
+ *tau = 0.;
+ }
+ }
+
+/*
+ On first call to routine, get machine parameters for
+ possible scaling to avoid overflow
+*/
+
+ if (first) {
+ eps = EPSILON;
+ base = BASE;
+ i__1 = (integer) (log(SAFEMINIMUM) / log(base) / 3.);
+ small1 = pow_di(&base, &i__1);
+ sminv1 = 1. / small1;
+ small2 = small1 * small1;
+ sminv2 = sminv1 * sminv1;
+ first = FALSE_;
+ }
+
+/*
+ Determine if scaling of inputs necessary to avoid overflow
+ when computing 1/TEMP**3
+*/
+
+ if (*orgati) {
+/* Computing MIN */
+ d__3 = (d__1 = d__[2] - *tau, abs(d__1)), d__4 = (d__2 = d__[3] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ } else {
+/* Computing MIN */
+ d__3 = (d__1 = d__[1] - *tau, abs(d__1)), d__4 = (d__2 = d__[2] - *
+ tau, abs(d__2));
+ temp = min(d__3,d__4);
+ }
+ scale = FALSE_;
+ if (temp <= small1) {
+ scale = TRUE_;
+ if (temp <= small2) {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
+
+ sclfac = sminv2;
+ sclinv = small2;
+ } else {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
+
+ sclfac = sminv1;
+ sclinv = small1;
+ }
+
+/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__] * sclfac;
+ zscale[i__ - 1] = z__[i__] * sclfac;
+/* L10: */
+ }
+ *tau *= sclfac;
+ } else {
+
+/* Copy D and Z to DSCALE and ZSCALE */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__];
+ zscale[i__ - 1] = z__[i__];
+/* L20: */
+ }
+ }
+
+ fc = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ fc += temp1 / dscale[i__ - 1];
+ df += temp2;
+ ddf += temp3;
+/* L30: */
+ }
+ f = *finit + *tau * fc;
+
+ if (abs(f) <= 0.) {
+ goto L60;
+ }
+
+/*
+ Iteration begins
+
+ It is not hard to see that
+
+ 1) Iterations will go up monotonically
+ if FINIT < 0;
+
+ 2) Iterations will go down monotonically
+ if FINIT > 0.
+*/
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+ if (*orgati) {
+ temp1 = dscale[1] - *tau;
+ temp2 = dscale[2] - *tau;
+ } else {
+ temp1 = dscale[0] - *tau;
+ temp2 = dscale[1] - *tau;
+ }
+ a = (temp1 + temp2) * f - temp1 * temp2 * df;
+ b = temp1 * temp2 * f;
+ c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
+/* Computing MAX */
+ d__1 = abs(a), d__2 = abs(b), d__1 = max(d__1,d__2), d__2 = abs(c__);
+ temp = max(d__1,d__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.) {
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+ if (f * eta >= 0.) {
+ eta = -f / df;
+ }
+
+ temp = eta + *tau;
+ if (*orgati) {
+ if (eta > 0. && temp >= dscale[2]) {
+ eta = (dscale[2] - *tau) / 2.;
+ }
+ if (eta < 0. && temp <= dscale[1]) {
+ eta = (dscale[1] - *tau) / 2.;
+ }
+ } else {
+ if (eta > 0. && temp >= dscale[1]) {
+ eta = (dscale[1] - *tau) / 2.;
+ }
+ if (eta < 0. && temp <= dscale[0]) {
+ eta = (dscale[0] - *tau) / 2.;
+ }
+ }
+ *tau += eta;
+
+ fc = 0.;
+ erretm = 0.;
+ df = 0.;
+ ddf = 0.;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1. / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ temp4 = temp1 / dscale[i__ - 1];
+ fc += temp4;
+ erretm += abs(temp4);
+ df += temp2;
+ ddf += temp3;
+/* L40: */
+ }
+ f = *finit + *tau * fc;
+ erretm = (abs(*finit) + abs(*tau) * erretm) * 8. + abs(*tau) * df;
+ if (abs(f) <= eps * erretm) {
+ goto L60;
+ }
+/* L50: */
+ }
+ *info = 1;
+L60:
+
+/* Undo scaling */
+
+ if (scale) {
+ *tau *= sclinv;
+ }
+ return 0;
+
+/* End of DLAED6 */
+
+} /* dlaed6_ */
+
+/* Subroutine */ int dlaed7_(integer *icompq, integer *n, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, doublereal *d__,
+ doublereal *q, integer *ldq, integer *indxq, doublereal *rho, integer
+ *cutpnt, doublereal *qstore, integer *qptr, integer *prmptr, integer *
+ perm, integer *givptr, integer *givcol, doublereal *givnum,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer indxc, indxp;
+ extern /* Subroutine */ int dlaed8_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *, integer *,
+ doublereal *, integer *, integer *, integer *), dlaed9_(integer *,
+ integer *, integer *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlaeda_(integer *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, doublereal *, integer *)
+ ;
+ static integer idlmda;
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED7 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and optionally eigenvectors of a dense symmetric matrix
+ that has been reduced to tridiagonal form. DLAED1 handles
+ the case in which all eigenvalues and eigenvectors of a symmetric
+ tridiagonal matrix are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLAED8.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine DLAED4 (as called by DLAED9).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= CURLVL <= TLVLS.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (output) INTEGER array, dimension (N)
+ The permutation which will reintegrate the subproblem just
+ solved back into sorted order, i.e., D( INDXQ( I = 1, N ) )
+ will be in ascending order.
+
+ RHO (input) DOUBLE PRECISION
+ The subdiagonal element used to create the rank-1
+ modification.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ QSTORE (input/output) DOUBLE PRECISION array, dimension (N**2+1)
+ Stores eigenvectors of submatrices encountered during
+ divide and conquer, packed together. QPTR points to
+ beginning of the submatrices.
+
+ QPTR (input/output) INTEGER array, dimension (N+2)
+ List of indices pointing to beginning of submatrices stored
+ in QSTORE. The submatrices are numbered starting at the
+ bottom left of the divide and conquer tree, from left to
+ right and bottom to top.
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and also the size of
+ the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (3*N+QSIZ*N)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ } else if ((min(1,*n) > *cutpnt) || (*n < *cutpnt)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLAED8 and DLAED9.
+*/
+
+ if (*icompq == 1) {
+ ldq2 = *qsiz;
+ } else {
+ ldq2 = *n;
+ }
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+ is = iq2 + *n * ldq2;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ dlaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
+ + *n], info);
+
+/*
+ When solving the final problem, we no longer need the stored data,
+ so we will overwrite the data from this level onto the previously
+ used storage space.
+*/
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ dlaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
+ cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
+ perm[prmptr[curr]], &givptr[curr + 1], &givcol[((givptr[curr]) <<
+ (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1], &iwork[indxp], &
+ iwork[indx], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ dlaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
+ &work[iw], &qstore[qptr[curr]], &k, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (*icompq == 1) {
+ dgemm_("N", "N", qsiz, &k, &k, &c_b2865, &work[iq2], &ldq2, &
+ qstore[qptr[curr]], &k, &c_b2879, &q[q_offset], ldq);
+ }
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+L30:
+ return 0;
+
+/* End of DLAED7 */
+
+} /* dlaed7_ */
+
+/* Subroutine */ int dlaed8_(integer *icompq, integer *k, integer *n, integer
+ *qsiz, doublereal *d__, doublereal *q, integer *ldq, integer *indxq,
+ doublereal *rho, integer *cutpnt, doublereal *z__, doublereal *dlamda,
+ doublereal *q2, integer *ldq2, doublereal *w, integer *perm, integer
+ *givptr, integer *givcol, doublereal *givnum, integer *indxp, integer
+ *indx, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j;
+ static doublereal s, t;
+ static integer k2, n1, n2, jp, n1p1;
+ static doublereal eps, tau, tol;
+ static integer jlam, imax, jmax;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *), dscal_(
+ integer *, doublereal *, doublereal *, integer *), dcopy_(integer
+ *, doublereal *, integer *, doublereal *, integer *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED8 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny element in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the eigenvalues of the two submatrices to be
+ combined. On exit, the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) DOUBLE PRECISION array, dimension (LDQ,N)
+ If ICOMPQ = 0, Q is not referenced. Otherwise,
+ on entry, Q contains the eigenvectors of the partially solved
+ system which has been previously updated in matrix
+ multiplies with other partially solved eigensystems.
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have CUTPNT added to
+ their values in order to be accurate.
+
+ RHO (input/output) DOUBLE PRECISION
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ DLAED3.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ Z (input) DOUBLE PRECISION array, dimension (N)
+ On entry, Z contains the updating vector (the last row of
+ the first sub-eigenvector matrix and the first row of the
+ second sub-eigenvector matrix).
+ On exit, the contents of Z are destroyed by the updating
+ process.
+
+ DLAMDA (output) DOUBLE PRECISION array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ DLAED3 to form the secular equation.
+
+ Q2 (output) DOUBLE PRECISION array, dimension (LDQ2,N)
+ If ICOMPQ = 0, Q2 is not referenced. Otherwise,
+ a copy of the first K eigenvectors which will be used by
+ DLAED7 in a matrix multiply (DGEMM) to update the new
+ eigenvectors.
+
+ LDQ2 (input) INTEGER
+ The leading dimension of the array Q2. LDQ2 >= max(1,N).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ The first k values of the final deflation-altered z-vector and
+ will be passed to DLAED3.
+
+ PERM (output) INTEGER array, dimension (N)
+ The permutations (from deflation and sorting) to be applied
+ to each eigenblock.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (output) INTEGER array, dimension (2, N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension (2, N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of D into ascending
+ order.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+ --indxp;
+ --indx;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if ((*cutpnt < min(1,*n)) || (*cutpnt > *n)) {
+ *info = -10;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.) {
+ dscal_(&n2, &c_b3001, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1. / sqrt(2.);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ dscal_(n, &t, &z__[1], &c__1);
+ *rho = (d__1 = *rho * 2., abs(d__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ dlamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ imax = idamax_(n, &z__[1], &c__1);
+ jmax = idamax_(n, &d__[1], &c__1);
+ eps = EPSILON;
+ tol = eps * 8. * (d__1 = d__[jmax], abs(d__1));
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (d__1 = z__[imax], abs(d__1)) <= tol) {
+ *k = 0;
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
+ + 1], &c__1);
+/* L60: */
+ }
+ dlacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ }
+ return 0;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L110;
+ }
+ } else {
+ jlam = j;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((d__1 = t * c__ * s, abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
+ givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
+ givnum[((*givptr) << (1)) + 1] = c__;
+ givnum[((*givptr) << (1)) + 2] = s;
+ if (*icompq == 1) {
+ drot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
+ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L110:
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ dcopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+ , &c__1);
+/* L130: */
+ }
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ if (*k < *n) {
+ if (*icompq == 0) {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ } else {
+ i__1 = *n - *k;
+ dcopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ dlacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
+ k + 1) * q_dim1 + 1], ldq);
+ }
+ }
+
+ return 0;
+
+/* End of DLAED8 */
+
+} /* dlaed8_ */
+
+/* Subroutine */ int dlaed9_(integer *k, integer *kstart, integer *kstop,
+ integer *n, doublereal *d__, doublereal *q, integer *ldq, doublereal *
+ rho, doublereal *dlamda, doublereal *w, doublereal *s, integer *lds,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaed4_(integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAED9 finds the roots of the secular equation, as defined by the
+ values in D, Z, and RHO, between KSTART and KSTOP. It makes the
+ appropriate calls to DLAED4 and then stores the new matrix of
+ eigenvectors for use in calculating the next level of Z vectors.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ DLAED4. K >= 0.
+
+ KSTART (input) INTEGER
+ KSTOP (input) INTEGER
+ The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP
+ are to be computed. 1 <= KSTART <= KSTOP <= K.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (delation may result in N > K).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ D(I) contains the updated eigenvalues
+ for KSTART <= I <= KSTOP.
+
+ Q (workspace) DOUBLE PRECISION array, dimension (LDQ,N)
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max( 1, N ).
+
+ RHO (input) DOUBLE PRECISION
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ W (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector.
+
+ S (output) DOUBLE PRECISION array, dimension (LDS, K)
+ Will contain the eigenvectors of the repaired matrix which
+ will be stored for subsequent Z vector calculation and
+ multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max( 1, K ).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --w;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if ((*kstart < 1) || (*kstart > max(1,*k))) {
+ *info = -2;
+ } else if ((max(1,*kstop) < *kstart) || (*kstop > max(1,*k))) {
+ *info = -3;
+ } else if (*n < *k) {
+ *info = -4;
+ } else if (*ldq < max(1,*k)) {
+ *info = -7;
+ } else if (*lds < max(1,*k)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAED9", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = dlamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *kstop;
+ for (j = *kstart; j <= i__1; ++j) {
+ dlaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if ((*k == 1) || (*k == 2)) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *k;
+ for (j = 1; j <= i__2; ++j) {
+ s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L120;
+ }
+
+/* Compute updated W. */
+
+ dcopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ dcopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__1 = sqrt(-w[i__]);
+ w[i__] = d_sign(&d__1, &s[i__ + s_dim1]);
+/* L80: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
+/* L90: */
+ }
+ temp = dnrm2_(k, &q[j * q_dim1 + 1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
+/* L100: */
+ }
+/* L110: */
+ }
+
+L120:
+ return 0;
+
+/* End of DLAED9 */
+
+} /* dlaed9_ */
+
+/* Subroutine */ int dlaeda_(integer *n, integer *tlvls, integer *curlvl,
+ integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
+ integer *givcol, doublereal *givnum, doublereal *q, integer *qptr,
+ doublereal *z__, doublereal *ztemp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k, mid, ptr;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer curr, bsiz1, bsiz2, psiz1, psiz2, zptr1;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAEDA computes the Z vector corresponding to the merge step in the
+ CURLVLth step of the merge process with TLVLS steps for the CURPBMth
+ problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= curlvl <= tlvls.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and incidentally the
+ size of the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ Q (input) DOUBLE PRECISION array, dimension (N**2)
+ Contains the square eigenblocks from previous levels, the
+ starting positions for blocks are given by QPTR.
+
+ QPTR (input) INTEGER array, dimension (N+2)
+ Contains a list of pointers which indicate where in Q an
+ eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates
+ the size of the block.
+
+ Z (output) DOUBLE PRECISION array, dimension (N)
+ On output this vector contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+
+ ZTEMP (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --ztemp;
+ --z__;
+ --qptr;
+ --q;
+ givnum -= 3;
+ givcol -= 3;
+ --givptr;
+ --perm;
+ --prmptr;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAEDA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine location of first number in second half. */
+
+ mid = *n / 2 + 1;
+
+/* Gather last/first rows of appropriate eigenblocks into center of Z */
+
+ ptr = 1;
+
+/*
+ Determine location of lowest level subproblem in the full storage
+ scheme
+*/
+
+ i__1 = *curlvl - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
+
+/*
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these square
+ roots.
+*/
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) + .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])) +
+ .5);
+ i__1 = mid - bsiz1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L10: */
+ }
+ dcopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
+ c__1);
+ dcopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
+ i__1 = *n;
+ for (k = mid + bsiz2; k <= i__1; ++k) {
+ z__[k] = 0.;
+/* L20: */
+ }
+
+/*
+ Loop thru remaining levels 1 -> CURLVL applying the Givens
+ rotations and permutation and then multiplying the center matrices
+ against the current Z.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = *curlvl - k;
+ i__3 = *curlvl - k - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
+ 1;
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ zptr1 = mid - psiz1;
+
+/* Apply Givens at CURR and CURR+1 */
+
+ i__2 = givptr[curr + 1] - 1;
+ for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[zptr1 + givcol[((i__) << (1)) + 1] - 1], &c__1,
+ &z__[zptr1 + givcol[((i__) << (1)) + 2] - 1], &c__1, &
+ givnum[((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L30: */
+ }
+ i__2 = givptr[curr + 2] - 1;
+ for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
+ drot_(&c__1, &z__[mid - 1 + givcol[((i__) << (1)) + 1]], &c__1, &
+ z__[mid - 1 + givcol[((i__) << (1)) + 2]], &c__1, &givnum[
+ ((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L40: */
+ }
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ i__2 = psiz1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
+/* L50: */
+ }
+ i__2 = psiz2 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
+ 1];
+/* L60: */
+ }
+
+/*
+ Multiply Blocks at CURR and CURR+1
+
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these
+ square roots.
+*/
+
+ bsiz1 = (integer) (sqrt((doublereal) (qptr[curr + 1] - qptr[curr])) +
+ .5);
+ bsiz2 = (integer) (sqrt((doublereal) (qptr[curr + 2] - qptr[curr + 1])
+ ) + .5);
+ if (bsiz1 > 0) {
+ dgemv_("T", &bsiz1, &bsiz1, &c_b2865, &q[qptr[curr]], &bsiz1, &
+ ztemp[1], &c__1, &c_b2879, &z__[zptr1], &c__1);
+ }
+ i__2 = psiz1 - bsiz1;
+ dcopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
+ if (bsiz2 > 0) {
+ dgemv_("T", &bsiz2, &bsiz2, &c_b2865, &q[qptr[curr + 1]], &bsiz2,
+ &ztemp[psiz1 + 1], &c__1, &c_b2879, &z__[mid], &c__1);
+ }
+ i__2 = psiz2 - bsiz2;
+ dcopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
+ c__1);
+
+ i__2 = *tlvls - k;
+ ptr += pow_ii(&c__2, &i__2);
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLAEDA */
+
+} /* dlaeda_ */
+
+/* Subroutine */ int dlaev2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *rt1, doublereal *rt2, doublereal *cs1, doublereal *sn1)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
+ static integer sgn1, sgn2;
+ static doublereal acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, RT2 is the
+ eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right
+ eigenvector for RT1, giving the decomposition
+
+ [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ]
+ [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ].
+
+ Arguments
+ =========
+
+ A (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) DOUBLE PRECISION
+ The (1,2) element and the conjugate of the (2,1) element of
+ the 2-by-2 matrix.
+
+ C (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) DOUBLE PRECISION
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) DOUBLE PRECISION
+ The eigenvalue of smaller absolute value.
+
+ CS1 (output) DOUBLE PRECISION
+ SN1 (output) DOUBLE PRECISION
+ The vector (CS1, SN1) is a unit right eigenvector for RT1.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ CS1 and SN1 are accurate to a few ulps barring over/underflow.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = abs(df);
+ tb = *b + *b;
+ ab = abs(tb);
+ if (abs(*a) > abs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ d__1 = ab / adf;
+ rt = adf * sqrt(d__1 * d__1 + 1.);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ d__1 = adf / ab;
+ rt = ab * sqrt(d__1 * d__1 + 1.);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.);
+ }
+ if (sm < 0.) {
+ *rt1 = (sm - rt) * .5;
+ sgn1 = -1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.) {
+ *rt1 = (sm + rt) * .5;
+ sgn1 = 1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5;
+ *rt2 = rt * -.5;
+ sgn1 = 1;
+ }
+
+/* Compute the eigenvector */
+
+ if (df >= 0.) {
+ cs = df + rt;
+ sgn2 = 1;
+ } else {
+ cs = df - rt;
+ sgn2 = -1;
+ }
+ acs = abs(cs);
+ if (acs > ab) {
+ ct = -tb / cs;
+ *sn1 = 1. / sqrt(ct * ct + 1.);
+ *cs1 = ct * *sn1;
+ } else {
+ if (ab == 0.) {
+ *cs1 = 1.;
+ *sn1 = 0.;
+ } else {
+ tn = -cs / tb;
+ *cs1 = 1. / sqrt(tn * tn + 1.);
+ *sn1 = tn * *cs1;
+ }
+ }
+ if (sgn1 == sgn2) {
+ tn = *cs1;
+ *cs1 = -(*sn1);
+ *sn1 = tn;
+ }
+ return 0;
+
+/* End of DLAEV2 */
+
+} /* dlaev2_ */
+
+/* Subroutine */ int dlahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, doublereal *h__, integer *ldh, doublereal
+ *wr, doublereal *wi, integer *iloz, integer *ihiz, doublereal *z__,
+ integer *ldz, integer *info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, k, l, m;
+ static doublereal s, v[3];
+ static integer i1, i2;
+ static doublereal t1, t2, t3, v1, v2, v3, h00, h10, h11, h12, h21, h22,
+ h33, h44;
+ static integer nh;
+ static doublereal cs;
+ static integer nr;
+ static doublereal sn;
+ static integer nz;
+ static doublereal ave, h33s, h44s;
+ static integer itn, its;
+ static doublereal ulp, sum, tst1, h43h34, disc, unfl, ovfl;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static doublereal work[1];
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlanv2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *), dlabad_(
+ doublereal *, doublereal *);
+
+ extern /* Subroutine */ int dlarfg_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *);
+ extern doublereal dlanhs_(char *, integer *, doublereal *, integer *,
+ doublereal *);
+ static doublereal smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAHQR is an auxiliary routine called by DHSEQR to update the
+ eigenvalues and Schur decomposition already computed by DHSEQR, by
+ dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
+
+ Arguments
+ =========
+
+ WANTT (input) LOGICAL
+ = .TRUE. : the full Schur form T is required;
+ = .FALSE.: only eigenvalues are required.
+
+ WANTZ (input) LOGICAL
+ = .TRUE. : the matrix of Schur vectors Z is required;
+ = .FALSE.: Schur vectors are not required.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper quasi-triangular in
+ rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless
+ ILO = 1). DLAHQR works primarily with the Hessenberg
+ submatrix in rows and columns ILO to IHI, but applies
+ transformations to all of H if WANTT is .TRUE..
+ 1 <= ILO <= max(1,IHI); IHI <= N.
+
+ H (input/output) DOUBLE PRECISION array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if WANTT is .TRUE., H is upper quasi-triangular in
+ rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in
+ standard form. If WANTT is .FALSE., the contents of H are
+ unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) DOUBLE PRECISION array, dimension (N)
+ WI (output) DOUBLE PRECISION array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues ILO to IHI are stored in the corresponding
+ elements of WR and WI. If two eigenvalues are computed as a
+ complex conjugate pair, they are stored in consecutive
+ elements of WR and WI, say the i-th and (i+1)th, with
+ WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the
+ eigenvalues are stored in the same order as on the diagonal
+ of the Schur form returned in H, with WR(i) = H(i,i), and, if
+ H(i:i+1,i:i+1) is a 2-by-2 diagonal block,
+ WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i).
+
+ ILOZ (input) INTEGER
+ IHIZ (input) INTEGER
+ Specify the rows of Z to which transformations must be
+ applied if WANTZ is .TRUE..
+ 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ If WANTZ is .TRUE., on entry Z must contain the current
+ matrix Z of transformations accumulated by DHSEQR, and on
+ exit Z has been updated; transformations are applied only to
+ the submatrix Z(ILOZ:IHIZ,ILO:IHI).
+ If WANTZ is .FALSE., Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: DLAHQR failed to compute all the eigenvalues ILO to IHI
+ in a total of 30*(IHI-ILO+1) iterations; if INFO = i,
+ elements i+1:ihi of WR and WI contain those eigenvalues
+ which have been successfully computed.
+
+ Further Details
+ ===============
+
+ 2-96 Based on modifications by
+ David Day, Sandia National Laboratory, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.;
+ return 0;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/*
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of 1 or 2. Each iteration of the loop works
+ with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L10:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L150;
+ }
+
+/*
+ Perform QR iterations on rows and columns ILO to I until a
+ submatrix of order 1 or 2 splits off at the bottom because a
+ subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (d__1 = h__[k - 1 + (k - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[k + k * h_dim1], abs(d__2));
+ if (tst1 == 0.) {
+ i__3 = i__ - l + 1;
+ tst1 = dlanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, work);
+ }
+/* Computing MAX */
+ d__2 = ulp * tst1;
+ if ((d__1 = h__[k + (k - 1) * h_dim1], abs(d__1)) <= max(d__2,
+ smlnum)) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ h__[l + (l - 1) * h_dim1] = 0.;
+ }
+
+/* Exit from loop if a submatrix of order 1 or 2 has split off. */
+
+ if (l >= i__ - 1) {
+ goto L140;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 10) || (its == 20)) {
+
+/* Exceptional shift. */
+
+ s = (d__1 = h__[i__ + (i__ - 1) * h_dim1], abs(d__1)) + (d__2 =
+ h__[i__ - 1 + (i__ - 2) * h_dim1], abs(d__2));
+ h44 = s * .75 + h__[i__ + i__ * h_dim1];
+ h33 = h44;
+ h43h34 = s * -.4375 * s;
+ } else {
+
+/*
+ Prepare to use Francis' double shift
+ (i.e. 2nd degree generalized Rayleigh quotient)
+*/
+
+ h44 = h__[i__ + i__ * h_dim1];
+ h33 = h__[i__ - 1 + (i__ - 1) * h_dim1];
+ h43h34 = h__[i__ + (i__ - 1) * h_dim1] * h__[i__ - 1 + i__ *
+ h_dim1];
+ s = h__[i__ - 1 + (i__ - 2) * h_dim1] * h__[i__ - 1 + (i__ - 2) *
+ h_dim1];
+ disc = (h33 - h44) * .5;
+ disc = disc * disc + h43h34;
+ if (disc > 0.) {
+
+/* Real roots: use Wilkinson's shift twice */
+
+ disc = sqrt(disc);
+ ave = (h33 + h44) * .5;
+ if (abs(h33) - abs(h44) > 0.) {
+ h33 = h33 * h44 - h43h34;
+ h44 = h33 / (d_sign(&disc, &ave) + ave);
+ } else {
+ h44 = d_sign(&disc, &ave) + ave;
+ }
+ h33 = h44;
+ h43h34 = 0.;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__2 = l;
+ for (m = i__ - 2; m >= i__2; --m) {
+/*
+ Determine the effect of starting the double-shift QR
+ iteration at row M, and see if this would make H(M,M-1)
+ negligible.
+*/
+
+ h11 = h__[m + m * h_dim1];
+ h22 = h__[m + 1 + (m + 1) * h_dim1];
+ h21 = h__[m + 1 + m * h_dim1];
+ h12 = h__[m + (m + 1) * h_dim1];
+ h44s = h44 - h11;
+ h33s = h33 - h11;
+ v1 = (h33s * h44s - h43h34) / h21 + h12;
+ v2 = h22 - h11 - h33s - h44s;
+ v3 = h__[m + 2 + (m + 1) * h_dim1];
+ s = abs(v1) + abs(v2) + abs(v3);
+ v1 /= s;
+ v2 /= s;
+ v3 /= s;
+ v[0] = v1;
+ v[1] = v2;
+ v[2] = v3;
+ if (m == l) {
+ goto L50;
+ }
+ h00 = h__[m - 1 + (m - 1) * h_dim1];
+ h10 = h__[m + (m - 1) * h_dim1];
+ tst1 = abs(v1) * (abs(h00) + abs(h11) + abs(h22));
+ if (abs(h10) * (abs(v2) + abs(v3)) <= ulp * tst1) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+/* Double-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = m; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__3 = 3, i__4 = i__ - k + 1;
+ nr = min(i__3,i__4);
+ if (k > m) {
+ dcopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ dlarfg_(&nr, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ h__[k + 1 + (k - 1) * h_dim1] = 0.;
+ if (k < i__ - 1) {
+ h__[k + 2 + (k - 1) * h_dim1] = 0.;
+ }
+ } else if (m > l) {
+ h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1];
+ }
+ v2 = v[1];
+ t2 = t1 * v2;
+ if (nr == 3) {
+ v3 = v[2];
+ t3 = t1 * v3;
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
+ + v3 * h__[k + 2 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+ h__[k + 2 + j * h_dim1] -= sum * t3;
+/* L60: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+
+ Computing MIN
+*/
+ i__4 = k + 3;
+ i__3 = min(i__4,i__);
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ + v3 * h__[j + (k + 2) * h_dim1];
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+ h__[j + (k + 2) * h_dim1] -= sum * t3;
+/* L70: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+ z__[j + (k + 2) * z_dim1] -= sum * t3;
+/* L80: */
+ }
+ }
+ } else if (nr == 2) {
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+/* L90: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+*/
+
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ ;
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+/* L100: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+/* L110: */
+ }
+ }
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L140:
+
+ if (l == i__) {
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.;
+ } else if (l == i__ - 1) {
+
+/*
+ H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
+
+ Transform the 2-by-2 submatrix to standard Schur form,
+ and compute and store the eigenvalues.
+*/
+
+ dlanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
+ h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
+ h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
+ &sn);
+
+ if (*wantt) {
+
+/* Apply the transformation to the rest of H. */
+
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ drot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
+ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
+ }
+ i__1 = i__ - i1 - 1;
+ drot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
+ h_dim1], &c__1, &cs, &sn);
+ }
+ if (*wantz) {
+
+/* Apply the transformation to Z. */
+
+ drot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
+ i__ * z_dim1], &c__1, &cs, &sn);
+ }
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L10;
+
+L150:
+ return 0;
+
+/* End of DLAHQR */
+
+} /* dlahqr_ */
+
+/* Subroutine */ int dlahrd_(integer *n, integer *k, integer *nb, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *t, integer *ldt,
+ doublereal *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal ei;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dgemv_(char *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dcopy_(integer *, doublereal *,
+ integer *, doublereal *, integer *), daxpy_(integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *), dtrmv_(char
+ *, char *, char *, integer *, doublereal *, integer *, doublereal
+ *, integer *), dlarfg_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLAHRD reduces the first NB columns of a real general n-by-(n-k+1)
+ matrix A so that elements below the k-th subdiagonal are zero. The
+ reduction is performed by an orthogonal similarity transformation
+ Q' * A * Q. The routine returns the matrices V and T which determine
+ Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
+
+ This is an auxiliary routine called by DGEHRD.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ K (input) INTEGER
+ The offset for the reduction. Elements below the k-th
+ subdiagonal in the first NB columns are reduced to zero.
+
+ NB (input) INTEGER
+ The number of columns to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N-K+1)
+ On entry, the n-by-(n-k+1) general matrix A.
+ On exit, the elements on and above the k-th subdiagonal in
+ the first NB columns are overwritten with the corresponding
+ elements of the reduced matrix; the elements below the k-th
+ subdiagonal, with the array TAU, represent the matrix Q as a
+ product of elementary reflectors. The other columns of A are
+ unchanged. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) DOUBLE PRECISION array, dimension (NB)
+ The scalar factors of the elementary reflectors. See Further
+ Details.
+
+ T (output) DOUBLE PRECISION array, dimension (LDT,NB)
+ The upper triangular matrix T.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= NB.
+
+ Y (output) DOUBLE PRECISION array, dimension (LDY,NB)
+ The n-by-nb matrix Y.
+
+ LDY (input) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of nb elementary reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
+ A(i+k+1:n,i), and tau in TAU(i).
+
+ The elements of the vectors v together form the (n-k+1)-by-nb matrix
+ V which is needed, with T and Y, to apply the transformation to the
+ unreduced part of the matrix, using an update of the form:
+ A := (I - V*T*V') * (A - Y*V').
+
+ The contents of A on exit are illustrated by the following example
+ with n = 7, k = 3 and nb = 2:
+
+ ( a h a a a )
+ ( a h a a a )
+ ( a h a a a )
+ ( h h a a a )
+ ( v1 h a a a )
+ ( v1 v2 a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/*
+ Update A(1:n,i)
+
+ Compute i-th column of A - Y * V'
+*/
+
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b3001, &y[y_offset], ldy, &a[*
+ k + i__ - 1 + a_dim1], lda, &c_b2865, &a[i__ * a_dim1 + 1]
+ , &c__1);
+
+/*
+ Apply I - V * T' * V' to this column (call it b) from the
+ left, using the last column of T as workspace
+
+ Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
+ ( V2 ) ( b2 )
+
+ where V1 is unit lower triangular
+
+ w := V1' * b1
+*/
+
+ i__2 = i__ - 1;
+ dcopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2865, &t[*
+ nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[*k + i__ +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b2865, &a[*
+ k + i__ + i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ dtrmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+ , lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &c_b3001, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/*
+ Generate the elementary reflector H(i) to annihilate
+ A(k+i+1:n,i)
+*/
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ dlarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ dgemv_("No transpose", n, &i__2, &c_b2865, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2879, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b2879, &t[i__ *
+ t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ dgemv_("No transpose", n, &i__2, &c_b3001, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b2865, &y[i__ * y_dim1 + 1], &c__1);
+ dscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ d__1 = -tau[i__];
+ dscal_(&i__2, &d__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+ return 0;
+
+/* End of DLAHRD */
+
+} /* dlahrd_ */
+
+/* Subroutine */ int dlaln2_(logical *ltrans, integer *na, integer *nw,
+ doublereal *smin, doublereal *ca, doublereal *a, integer *lda,
+ doublereal *d1, doublereal *d2, doublereal *b, integer *ldb,
+ doublereal *wr, doublereal *wi, doublereal *x, integer *ldx,
+ doublereal *scale, doublereal *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static logical zswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+ static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
+ 4,3,2,1 };
+
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
+ doublereal d__1, d__2, d__3, d__4, d__5, d__6;
+ static doublereal equiv_0[4], equiv_1[4];
+
+ /* Local variables */
+ static integer j;
+#define ci (equiv_0)
+#define cr (equiv_1)
+ static doublereal bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22,
+ cr21, cr22, li21, csi, ui11, lr21, ui12, ui22;
+#define civ (equiv_0)
+ static doublereal csr, ur11, ur12, ur22;
+#define crv (equiv_1)
+ static doublereal bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
+ static integer icmax;
+ static doublereal bnorm, cnorm, smini;
+
+ extern /* Subroutine */ int dladiv_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *);
+ static doublereal bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLALN2 solves a system of the form (ca A - w D ) X = s B
+ or (ca A' - w D) X = s B with possible scaling ("s") and
+ perturbation of A. (A' means A-transpose.)
+
+ A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA
+ real diagonal matrix, w is a real or complex value, and X and B are
+ NA x 1 matrices -- real if w is real, complex if w is complex. NA
+ may be 1 or 2.
+
+ If w is complex, X and B are represented as NA x 2 matrices,
+ the first column of each being the real part and the second
+ being the imaginary part.
+
+ "s" is a scaling factor (.LE. 1), computed by DLALN2, which is
+ so chosen that X can be computed without overflow. X is further
+ scaled if necessary to assure that norm(ca A - w D)*norm(X) is less
+ than overflow.
+
+ If both singular values of (ca A - w D) are less than SMIN,
+ SMIN*identity will be used instead of (ca A - w D). If only one
+ singular value is less than SMIN, one element of (ca A - w D) will be
+ perturbed enough to make the smallest singular value roughly SMIN.
+ If both singular values are at least SMIN, (ca A - w D) will not be
+ perturbed. In any case, the perturbation will be at most some small
+ multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values
+ are computed by infinity-norm approximations, and thus will only be
+ correct to a factor of 2 or so.
+
+ Note: all input quantities are assumed to be smaller than overflow
+ by a reasonable factor. (See BIGNUM.)
+
+ Arguments
+ ==========
+
+ LTRANS (input) LOGICAL
+ =.TRUE.: A-transpose will be used.
+ =.FALSE.: A will be used (not transposed.)
+
+ NA (input) INTEGER
+ The size of the matrix A. It may (only) be 1 or 2.
+
+ NW (input) INTEGER
+ 1 if "w" is real, 2 if "w" is complex. It may only be 1
+ or 2.
+
+ SMIN (input) DOUBLE PRECISION
+ The desired lower bound on the singular values of A. This
+ should be a safe distance away from underflow or overflow,
+ say, between (underflow/machine precision) and (machine
+ precision * overflow ). (See BIGNUM and ULP.)
+
+ CA (input) DOUBLE PRECISION
+ The coefficient c, which A is multiplied by.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,NA)
+ The NA x NA matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of A. It must be at least NA.
+
+ D1 (input) DOUBLE PRECISION
+ The 1,1 element in the diagonal matrix D.
+
+ D2 (input) DOUBLE PRECISION
+ The 2,2 element in the diagonal matrix D. Not used if NW=1.
+
+ B (input) DOUBLE PRECISION array, dimension (LDB,NW)
+ The NA x NW matrix B (right-hand side). If NW=2 ("w" is
+ complex), column 1 contains the real part of B and column 2
+ contains the imaginary part.
+
+ LDB (input) INTEGER
+ The leading dimension of B. It must be at least NA.
+
+ WR (input) DOUBLE PRECISION
+ The real part of the scalar "w".
+
+ WI (input) DOUBLE PRECISION
+ The imaginary part of the scalar "w". Not used if NW=1.
+
+ X (output) DOUBLE PRECISION array, dimension (LDX,NW)
+ The NA x NW matrix X (unknowns), as computed by DLALN2.
+ If NW=2 ("w" is complex), on exit, column 1 will contain
+ the real part of X and column 2 will contain the imaginary
+ part.
+
+ LDX (input) INTEGER
+ The leading dimension of X. It must be at least NA.
+
+ SCALE (output) DOUBLE PRECISION
+ The scale factor that B must be multiplied by to insure
+ that overflow does not occur when computing X. Thus,
+ (ca A - w D) X will be SCALE*B, not B (ignoring
+ perturbations of A.) It will be at most 1.
+
+ XNORM (output) DOUBLE PRECISION
+ The infinity-norm of X, when X is regarded as an NA x NW
+ real matrix.
+
+ INFO (output) INTEGER
+ An error flag. It will be set to zero if no error occurs,
+ a negative number if an argument is in error, or a positive
+ number if ca A - w D had to be perturbed.
+ The possible values are:
+ = 0: No error occurred, and (ca A - w D) did not have to be
+ perturbed.
+ = 1: (ca A - w D) had to be perturbed to make its smallest
+ (or only) singular value greater than SMIN.
+ NOTE: In the interests of speed, this routine does not
+ check the inputs for errors.
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+
+/* Compute BIGNUM */
+
+ smlnum = 2. * SAFEMINIMUM;
+ bignum = 1. / smlnum;
+ smini = max(*smin,smlnum);
+
+/* Don't check for input errors */
+
+ *info = 0;
+
+/* Standard Initializations */
+
+ *scale = 1.;
+
+ if (*na == 1) {
+
+/* 1 x 1 (i.e., scalar) system C X = B */
+
+ if (*nw == 1) {
+
+/*
+ Real 1x1 system.
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cnorm = abs(csr);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1));
+ if (cnorm < 1. && bnorm > 1.) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1));
+ } else {
+
+/*
+ Complex 1x1 system (w is complex)
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ csi = -(*wi) * *d1;
+ cnorm = abs(csr) + abs(csi);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ csi = 0.;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1) <<
+ (1)) + 1], abs(d__2));
+ if (cnorm < 1. && bnorm > 1.) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1. / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ d__1 = *scale * b[b_dim1 + 1];
+ d__2 = *scale * b[((b_dim1) << (1)) + 1];
+ dladiv_(&d__1, &d__2, &csr, &csi, &x[x_dim1 + 1], &x[((x_dim1) <<
+ (1)) + 1]);
+ *xnorm = (d__1 = x[x_dim1 + 1], abs(d__1)) + (d__2 = x[((x_dim1)
+ << (1)) + 1], abs(d__2));
+ }
+
+ } else {
+
+/*
+ 2x2 System
+
+ Compute the real part of C = ca A - w D (or ca A' - w D )
+*/
+
+ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cr[3] = *ca * a[((a_dim1) << (1)) + 2] - *wr * *d2;
+ if (*ltrans) {
+ cr[2] = *ca * a[a_dim1 + 2];
+ cr[1] = *ca * a[((a_dim1) << (1)) + 1];
+ } else {
+ cr[1] = *ca * a[a_dim1 + 2];
+ cr[2] = *ca * a[((a_dim1) << (1)) + 1];
+ }
+
+ if (*nw == 1) {
+
+/*
+ Real 2x2 system (w is real)
+
+ Find the largest element in C
+*/
+
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1));
+ icmax = j;
+ }
+/* L10: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__3 = (d__1 = b[b_dim1 + 1], abs(d__1)), d__4 = (d__2 = b[
+ b_dim1 + 2], abs(d__2));
+ bnorm = max(d__3,d__4);
+ if (smini < 1. && bnorm > 1.) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ur11r = 1. / ur11;
+ lr21 = ur11r * cr21;
+ ur22 = cr22 - ur12 * lr21;
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (abs(ur22) < smini) {
+ ur22 = smini;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br1 = b[b_dim1 + 2];
+ br2 = b[b_dim1 + 1];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ }
+ br2 -= lr21 * br1;
+/* Computing MAX */
+ d__2 = (d__1 = br1 * (ur22 * ur11r), abs(d__1)), d__3 = abs(br2);
+ bbnd = max(d__2,d__3);
+ if (bbnd > 1. && abs(ur22) < 1.) {
+ if (bbnd >= bignum * abs(ur22)) {
+ *scale = 1. / bbnd;
+ }
+ }
+
+ xr2 = br2 * *scale / ur22;
+ xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1), d__2 = abs(xr2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1. && cmax > 1.) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ } else {
+
+/*
+ Complex 2x2 system (w is complex)
+
+ Find the largest element in C
+*/
+
+ ci[0] = -(*wi) * *d1;
+ ci[1] = 0.;
+ ci[2] = 0.;
+ ci[3] = -(*wi) * *d2;
+ cmax = 0.;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1], abs(
+ d__2)) > cmax) {
+ cmax = (d__1 = crv[j - 1], abs(d__1)) + (d__2 = civ[j - 1]
+ , abs(d__2));
+ icmax = j;
+ }
+/* L20: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ d__5 = (d__1 = b[b_dim1 + 1], abs(d__1)) + (d__2 = b[((b_dim1)
+ << (1)) + 1], abs(d__2)), d__6 = (d__3 = b[b_dim1 +
+ 2], abs(d__3)) + (d__4 = b[((b_dim1) << (1)) + 2],
+ abs(d__4));
+ bnorm = max(d__5,d__6);
+ if (smini < 1. && bnorm > 1.) {
+ if (bnorm > bignum * smini) {
+ *scale = 1. / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * b[((b_dim1) << (1)) + 1];
+ x[((x_dim1) << (1)) + 2] = temp * b[((b_dim1) << (1)) + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ ui11 = civ[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ci21 = civ[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ ui12 = civ[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ci22 = civ[ipivot[((icmax) << (2)) - 1] - 1];
+ if ((icmax == 1) || (icmax == 4)) {
+
+/* Code when off-diagonals of pivoted C are real */
+
+ if (abs(ur11) > abs(ui11)) {
+ temp = ui11 / ur11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ur11r = 1. / (ur11 * (d__1 * d__1 + 1.));
+ ui11r = -temp * ur11r;
+ } else {
+ temp = ur11 / ui11;
+/* Computing 2nd power */
+ d__1 = temp;
+ ui11r = -1. / (ui11 * (d__1 * d__1 + 1.));
+ ur11r = -temp * ui11r;
+ }
+ lr21 = cr21 * ur11r;
+ li21 = cr21 * ui11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ur12 * ui11r;
+ ur22 = cr22 - ur12 * lr21;
+ ui22 = ci22 - ur12 * li21;
+ } else {
+
+/* Code when diagonals of pivoted C are real */
+
+ ur11r = 1. / ur11;
+ ui11r = 0.;
+ lr21 = cr21 * ur11r;
+ li21 = ci21 * ur11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ui12 * ur11r;
+ ur22 = cr22 - ur12 * lr21 + ui12 * li21;
+ ui22 = -ur12 * li21 - ui12 * lr21;
+ }
+ u22abs = abs(ur22) + abs(ui22);
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (u22abs < smini) {
+ ur22 = smini;
+ ui22 = 0.;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br2 = b[b_dim1 + 1];
+ br1 = b[b_dim1 + 2];
+ bi2 = b[((b_dim1) << (1)) + 1];
+ bi1 = b[((b_dim1) << (1)) + 2];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ bi1 = b[((b_dim1) << (1)) + 1];
+ bi2 = b[((b_dim1) << (1)) + 2];
+ }
+ br2 = br2 - lr21 * br1 + li21 * bi1;
+ bi2 = bi2 - li21 * br1 - lr21 * bi1;
+/* Computing MAX */
+ d__1 = (abs(br1) + abs(bi1)) * (u22abs * (abs(ur11r) + abs(ui11r))
+ ), d__2 = abs(br2) + abs(bi2);
+ bbnd = max(d__1,d__2);
+ if (bbnd > 1. && u22abs < 1.) {
+ if (bbnd >= bignum * u22abs) {
+ *scale = 1. / bbnd;
+ br1 = *scale * br1;
+ bi1 = *scale * bi1;
+ br2 = *scale * br2;
+ bi2 = *scale * bi2;
+ }
+ }
+
+ dladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
+ xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
+ xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
+ if (zswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ x[((x_dim1) << (1)) + 1] = xi2;
+ x[((x_dim1) << (1)) + 2] = xi1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ x[((x_dim1) << (1)) + 1] = xi1;
+ x[((x_dim1) << (1)) + 2] = xi2;
+ }
+/* Computing MAX */
+ d__1 = abs(xr1) + abs(xi1), d__2 = abs(xr2) + abs(xi2);
+ *xnorm = max(d__1,d__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1. && cmax > 1.) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * x[((x_dim1) << (1)) + 1]
+ ;
+ x[((x_dim1) << (1)) + 2] = temp * x[((x_dim1) << (1)) + 2]
+ ;
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLALN2 */
+
+} /* dlaln2_ */
+
+#undef crv
+#undef civ
+#undef cr
+#undef ci
+
+
+/* Subroutine */ int dlals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, doublereal *b, integer *ldb, doublereal
+ *bx, integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, doublereal *givnum, integer *ldgnum, doublereal *
+ poles, doublereal *difl, doublereal *difr, doublereal *z__, integer *
+ k, doublereal *c__, doublereal *s, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static doublereal dj;
+ static integer nlp1;
+ static doublereal temp;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dcopy_(integer *,
+ doublereal *, integer *, doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static doublereal dsigjp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ DLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) DOUBLE PRECISION array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) DOUBLE PRECISION array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( K )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
+ ;
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ dcopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ dcopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.) {
+ dscal_(nrhs, &c_b3001, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + ((poles_dim1) << (1))];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
+ }
+ if ((z__[j] == 0.) || (poles[j + ((poles_dim1) << (1))] == 0.)
+ ) {
+ work[j] = 0.;
+ } else {
+ work[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
+ diflj / (poles[j + ((poles_dim1) << (1))] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.) || (poles[i__ + ((poles_dim1) << (1))
+ ] == 0.)) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigj) - diflj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.) || (poles[i__ + ((poles_dim1) << (1))
+ ] == 0.)) {
+ work[i__] = 0.;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (dlamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigjp) + difrj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.;
+ temp = dnrm2_(k, &work[1], &c__1);
+ dgemv_("T", k, nrhs, &c_b2865, &bx[bx_offset], ldbx, &work[1],
+ &c__1, &c_b2879, &b[j + b_dim1], ldb);
+ dlascl_("G", &c__0, &c__0, &temp, &c_b2865, &c__1, nrhs, &b[j
+ + b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ dcopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + ((poles_dim1) << (1))];
+ if (z__[j] == 0.) {
+ work[j] = 0.;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + ((difr_dim1) << (1))];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
+ ];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.) {
+ work[i__] = 0.;
+ } else {
+ d__1 = -poles[i__ + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (dlamc3_(&dsigj, &d__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + ((difr_dim1) << (1))];
+ }
+/* L70: */
+ }
+ dgemv_("T", k, nrhs, &c_b2865, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b2879, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ dcopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ drot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ dcopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ dcopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dcopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ d__1 = -givnum[i__ + givnum_dim1];
+ drot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &d__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of DLALS0 */
+
+} /* dlals0_ */
+
+/* Subroutine */ int dlalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, doublereal *b, integer *ldb, doublereal *bx, integer *
+ ldbx, doublereal *u, integer *ldu, doublereal *vt, integer *k,
+ doublereal *difl, doublereal *difr, doublereal *z__, doublereal *
+ poles, integer *givptr, integer *givcol, integer *ldgcol, integer *
+ perm, doublereal *givnum, doublereal *c__, doublereal *s, doublereal *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, nlvl, sqre;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer inode, ndiml, ndimr;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlals0_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *), dlasdt_(integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, DLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, DLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by DLALSA.
+
+ Arguments
+ =========
+
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input) DOUBLE PRECISION array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) DOUBLE PRECISION array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) DOUBLE PRECISION array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) DOUBLE PRECISION array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) DOUBLE PRECISION array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) DOUBLE PRECISION array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) DOUBLE PRECISION array.
+ The dimension must be at least N.
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 50.
+*/
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nl, nrhs, &nl, &c_b2865, &u[nlf + u_dim1], ldu, &b[
+ nlf + b_dim1], ldb, &c_b2879, &bx[nlf + bx_dim1], ldbx);
+ dgemm_("T", "N", &nr, nrhs, &nr, &c_b2865, &u[nrf + u_dim1], ldu, &b[
+ nrf + b_dim1], ldb, &c_b2879, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ dcopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ dlals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by DLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ dgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b2865, &vt[nlf + vt_dim1],
+ ldu, &b[nlf + b_dim1], ldb, &c_b2879, &bx[nlf + bx_dim1],
+ ldbx);
+ dgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b2865, &vt[nrf + vt_dim1],
+ ldu, &b[nrf + b_dim1], ldb, &c_b2879, &bx[nrf + bx_dim1],
+ ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of DLALSA */
+
+} /* dlalsa_ */
+
+/* Subroutine */ int dlalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb,
+ doublereal *rcond, integer *rank, doublereal *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double log(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static doublereal r__;
+ static integer s, u, z__;
+ static doublereal cs;
+ static integer bx;
+ static doublereal sn;
+ static integer st, vt, nm1, st1;
+ static doublereal eps;
+ static integer iwk;
+ static doublereal tol;
+ static integer difl, difr, perm, nsub;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer nlvl, sqre, bxst;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *),
+ dcopy_(integer *, doublereal *, integer *, doublereal *, integer
+ *);
+ static integer poles, sizei, nsize, nwork, icmpq1, icmpq2;
+
+ extern /* Subroutine */ int dlasda_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *), dlalsa_(integer *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlartg_(doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *), dlaset_(char *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static integer givcol;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static doublereal orgnrm;
+ static integer givnum, givptr, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) DOUBLE PRECISION
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2),
+ where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1).
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if ((*ldb < 1) || (*ldb < *n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLALSD", &i__1);
+ return 0;
+ }
+
+ eps = EPSILON;
+
+/* Set up the tolerance. */
+
+ if ((*rcond <= 0.) || (*rcond >= 1.)) {
+ *rcond = eps;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.) {
+ dlaset_("A", &c__1, nrhs, &c_b2879, &c_b2879, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ dlascl_("G", &c__0, &c__0, &d__[1], &c_b2865, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = abs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ drot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[((i__) << (1)) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[((j) << (1)) - 1];
+ sn = work[j * 2];
+ drot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ dlaset_("A", n, nrhs, &c_b2879, &c_b2879, &b[b_offset], ldb);
+ return 0;
+ }
+
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, n, &c__1, &d__[1], n, info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ dlaset_("A", n, n, &c_b2879, &c_b2865, &work[1], n);
+ dlasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b2879, &c_b2879, &b[i__ + b_dim1]
+ , ldb);
+ } else {
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b2865, &c__1, nrhs, &
+ b[i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ dgemm_("T", "N", n, nrhs, n, &c_b2865, &work[1], n, &b[b_offset], ldb,
+ &c_b2879, &work[nwork], n);
+ dlacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((doublereal) (*n) / (doublereal) (*smlsiz + 1)) /
+ log(2.)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + ((nlvl * *n) << (1));
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + ((nlvl) << (1)) * *n;
+ bx = givnum + ((nlvl) << (1)) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + ((nlvl * *n) << (1));
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) < eps) {
+ d__[i__] = d_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (((d__1 = e[i__], abs(d__1)) < eps) || (i__ == nm1)) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((d__1 = e[i__], abs(d__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ dcopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ dcopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by DLASDQ. */
+
+ dlaset_("A", &nsize, &nsize, &c_b2879, &c_b2865, &work[vt +
+ st1], n);
+ dlasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ dlacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ dlasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = *rcond * (d__1 = d__[idamax_(n, &d__[1], &c__1)], abs(d__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((d__1 = d__[i__], abs(d__1)) <= tol) {
+ dlaset_("A", &c__1, nrhs, &c_b2879, &c_b2879, &work[bx + i__ - 1],
+ n);
+ } else {
+ ++(*rank);
+ dlascl_("G", &c__0, &c__0, &d__[i__], &c_b2865, &c__1, nrhs, &
+ work[bx + i__ - 1], n, info);
+ }
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ dcopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ dgemm_("T", "N", &nsize, nrhs, &nsize, &c_b2865, &work[vt + st1],
+ n, &work[bxst], n, &c_b2879, &b[st + b_dim1], ldb);
+ } else {
+ dlalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, n, &c__1, &d__[1], n, info);
+ dlasrt_("D", n, &d__[1], info);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of DLALSD */
+
+} /* dlalsd_ */
+
+/* Subroutine */ int dlamrg_(integer *n1, integer *n2, doublereal *a, integer
+ *dtrd1, integer *dtrd2, integer *index)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ind1, ind2, n1sv, n2sv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAMRG will create a permutation list which will merge the elements
+ of A (which is composed of two independently sorted sets) into a
+ single set which is sorted in ascending order.
+
+ Arguments
+ =========
+
+ N1 (input) INTEGER
+ N2 (input) INTEGER
+ These arguements contain the respective lengths of the two
+ sorted lists to be merged.
+
+ A (input) DOUBLE PRECISION array, dimension (N1+N2)
+ The first N1 elements of A contain a list of numbers which
+ are sorted in either ascending or descending order. Likewise
+ for the final N2 elements.
+
+ DTRD1 (input) INTEGER
+ DTRD2 (input) INTEGER
+ These are the strides to be taken through the array A.
+ Allowable strides are 1 and -1. They indicate whether a
+ subset of A is sorted in ascending (DTRDx = 1) or descending
+ (DTRDx = -1) order.
+
+ INDEX (output) INTEGER array, dimension (N1+N2)
+ On exit this array will contain a permutation such that
+ if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be
+ sorted in ascending order.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --index;
+ --a;
+
+ /* Function Body */
+ n1sv = *n1;
+ n2sv = *n2;
+ if (*dtrd1 > 0) {
+ ind1 = 1;
+ } else {
+ ind1 = *n1;
+ }
+ if (*dtrd2 > 0) {
+ ind2 = *n1 + 1;
+ } else {
+ ind2 = *n1 + *n2;
+ }
+ i__ = 1;
+/* while ( (N1SV > 0) & (N2SV > 0) ) */
+L10:
+ if (n1sv > 0 && n2sv > 0) {
+ if (a[ind1] <= a[ind2]) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+ --n1sv;
+ } else {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+ --n2sv;
+ }
+ goto L10;
+ }
+/* end while */
+ if (n1sv == 0) {
+ i__1 = n2sv;
+ for (n1sv = 1; n1sv <= i__1; ++n1sv) {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *dtrd2;
+/* L20: */
+ }
+ } else {
+/* N2SV .EQ. 0 */
+ i__1 = n1sv;
+ for (n2sv = 1; n2sv <= i__1; ++n2sv) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *dtrd1;
+/* L30: */
+ }
+ }
+
+ return 0;
+
+/* End of DLAMRG */
+
+} /* dlamrg_ */
+
+doublereal dlange_(char *norm, integer *m, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANGE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real matrix A.
+
+ Description
+ ===========
+
+ DLANGE returns the value
+
+ DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANGE as described
+ above.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0. When M = 0,
+ DLANGE is set to zero.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0. When N = 0,
+ DLANGE is set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(M,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= M when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ dlassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANGE */
+
+} /* dlange_ */
+
+doublereal dlanhs_(char *norm, integer *n, doublereal *a, integer *lda,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANHS returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ Hessenberg matrix A.
+
+ Description
+ ===========
+
+ DLANHS returns the value
+
+ DLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANHS as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANHS is
+ set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The n by n upper Hessenberg matrix A; the part of A below the
+ first sub-diagonal is not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L30: */
+ }
+ value = max(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (d__1 = a[i__ + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANHS */
+
+} /* dlanhs_ */
+
+doublereal dlanst_(char *norm, integer *n, doublereal *d__, doublereal *e)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal ret_val, d__1, d__2, d__3, d__4, d__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal sum, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal anorm;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLANST returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric tridiagonal matrix A.
+
+ Description
+ ===========
+
+ DLANST returns the value
+
+ DLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANST as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANST is
+ set to zero.
+
+ D (input) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of A.
+
+ E (input) DOUBLE PRECISION array, dimension (N-1)
+ The (n-1) sub-diagonal or super-diagonal elements of A.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (d__1 = d__[*n], abs(d__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = d__[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* Computing MAX */
+ d__2 = anorm, d__3 = (d__1 = e[i__], abs(d__1));
+ anorm = max(d__2,d__3);
+/* L10: */
+ }
+ } else if (((lsame_(norm, "O")) || (*(unsigned char
+ *)norm == '1')) || (lsame_(norm, "I"))) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = abs(d__[1]);
+ } else {
+/* Computing MAX */
+ d__3 = abs(d__[1]) + abs(e[1]), d__4 = (d__1 = e[*n - 1], abs(
+ d__1)) + (d__2 = d__[*n], abs(d__2));
+ anorm = max(d__3,d__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__4 = anorm, d__5 = (d__1 = d__[i__], abs(d__1)) + (d__2 = e[
+ i__], abs(d__2)) + (d__3 = e[i__ - 1], abs(d__3));
+ anorm = max(d__4,d__5);
+/* L20: */
+ }
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ dlassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ dlassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of DLANST */
+
+} /* dlanst_ */
+
+doublereal dlansy_(char *norm, char *uplo, integer *n, doublereal *a, integer
+ *lda, doublereal *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ static doublereal value;
+ extern /* Subroutine */ int dlassq_(integer *, doublereal *, integer *,
+ doublereal *, doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLANSY returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric matrix A.
+
+ Description
+ ===========
+
+ DLANSY returns the value
+
+ DLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in DLANSY as described
+ above.
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is to be referenced.
+ = 'U': Upper triangular part of A is referenced
+ = 'L': Lower triangular part of A is referenced
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, DLANSY is
+ set to zero.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The symmetric matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of A contains the upper triangular part
+ of the matrix A, and the strictly lower triangular part of A
+ is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of A contains the lower triangular part of
+ the matrix A, and the strictly upper triangular part of A is
+ not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
+ WORK is not referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ d__2 = value, d__3 = (d__1 = a[i__ + j * a_dim1], abs(
+ d__1));
+ value = max(d__2,d__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (((lsame_(norm, "I")) || (lsame_(norm,
+ "O"))) || (*(unsigned char *)norm == '1')) {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (d__1 = a[j + j * a_dim1], abs(d__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = value, d__2 = work[i__];
+ value = max(d__1,d__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (d__1 = a[j + j * a_dim1], abs(d__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (d__1 = a[i__ + j * a_dim1], abs(d__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = max(value,sum);
+/* L100: */
+ }
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.;
+ sum = 1.;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ dlassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ dlassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ dlassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of DLANSY */
+
+} /* dlansy_ */
+
+/* Subroutine */ int dlanv2_(doublereal *a, doublereal *b, doublereal *c__,
+ doublereal *d__, doublereal *rt1r, doublereal *rt1i, doublereal *rt2r,
+ doublereal *rt2i, doublereal *cs, doublereal *sn)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *), sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau,
+ temp, scale, bcmax, bcmis, sigma;
+
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric
+ matrix in standard form:
+
+ [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ]
+ [ C D ] [ SN CS ] [ CC DD ] [-SN CS ]
+
+ where either
+ 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or
+ 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex
+ conjugate eigenvalues.
+
+ Arguments
+ =========
+
+ A (input/output) DOUBLE PRECISION
+ B (input/output) DOUBLE PRECISION
+ C (input/output) DOUBLE PRECISION
+ D (input/output) DOUBLE PRECISION
+ On entry, the elements of the input matrix.
+ On exit, they are overwritten by the elements of the
+ standardised Schur form.
+
+ RT1R (output) DOUBLE PRECISION
+ RT1I (output) DOUBLE PRECISION
+ RT2R (output) DOUBLE PRECISION
+ RT2I (output) DOUBLE PRECISION
+ The real and imaginary parts of the eigenvalues. If the
+ eigenvalues are a complex conjugate pair, RT1I > 0.
+
+ CS (output) DOUBLE PRECISION
+ SN (output) DOUBLE PRECISION
+ Parameters of the rotation matrix.
+
+ Further Details
+ ===============
+
+ Modified by V. Sima, Research Institute for Informatics, Bucharest,
+ Romania, to reduce the risk of cancellation errors,
+ when computing real eigenvalues, and to ensure, if possible, that
+ abs(RT1R) >= abs(RT2R).
+
+ =====================================================================
+*/
+
+
+ eps = PRECISION;
+ if (*c__ == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+
+ } else if (*b == 0.) {
+
+/* Swap rows and columns */
+
+ *cs = 0.;
+ *sn = 1.;
+ temp = *d__;
+ *d__ = *a;
+ *a = temp;
+ *b = -(*c__);
+ *c__ = 0.;
+ goto L10;
+ } else if (*a - *d__ == 0. && d_sign(&c_b2865, b) != d_sign(&c_b2865, c__)
+ ) {
+ *cs = 1.;
+ *sn = 0.;
+ goto L10;
+ } else {
+
+ temp = *a - *d__;
+ p = temp * .5;
+/* Computing MAX */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = abs(*b), d__2 = abs(*c__);
+ bcmis = min(d__1,d__2) * d_sign(&c_b2865, b) * d_sign(&c_b2865, c__);
+/* Computing MAX */
+ d__1 = abs(p);
+ scale = max(d__1,bcmax);
+ z__ = p / scale * p + bcmax / scale * bcmis;
+
+/*
+ If Z is of the order of the machine accuracy, postpone the
+ decision on the nature of eigenvalues
+*/
+
+ if (z__ >= eps * 4.) {
+
+/* Real eigenvalues. Compute A and D. */
+
+ d__1 = sqrt(scale) * sqrt(z__);
+ z__ = p + d_sign(&d__1, &p);
+ *a = *d__ + z__;
+ *d__ -= bcmax / z__ * bcmis;
+
+/* Compute B and the rotation matrix */
+
+ tau = dlapy2_(c__, &z__);
+ *cs = z__ / tau;
+ *sn = *c__ / tau;
+ *b -= *c__;
+ *c__ = 0.;
+ } else {
+
+/*
+ Complex eigenvalues, or real (almost) equal eigenvalues.
+ Make diagonal elements equal.
+*/
+
+ sigma = *b + *c__;
+ tau = dlapy2_(&sigma, &temp);
+ *cs = sqrt((abs(sigma) / tau + 1.) * .5);
+ *sn = -(p / (tau * *cs)) * d_sign(&c_b2865, &sigma);
+
+/*
+ Compute [ AA BB ] = [ A B ] [ CS -SN ]
+ [ CC DD ] [ C D ] [ SN CS ]
+*/
+
+ aa = *a * *cs + *b * *sn;
+ bb = -(*a) * *sn + *b * *cs;
+ cc = *c__ * *cs + *d__ * *sn;
+ dd = -(*c__) * *sn + *d__ * *cs;
+
+/*
+ Compute [ A B ] = [ CS SN ] [ AA BB ]
+ [ C D ] [-SN CS ] [ CC DD ]
+*/
+
+ *a = aa * *cs + cc * *sn;
+ *b = bb * *cs + dd * *sn;
+ *c__ = -aa * *sn + cc * *cs;
+ *d__ = -bb * *sn + dd * *cs;
+
+ temp = (*a + *d__) * .5;
+ *a = temp;
+ *d__ = temp;
+
+ if (*c__ != 0.) {
+ if (*b != 0.) {
+ if (d_sign(&c_b2865, b) == d_sign(&c_b2865, c__)) {
+
+/* Real eigenvalues: reduce to upper triangular form */
+
+ sab = sqrt((abs(*b)));
+ sac = sqrt((abs(*c__)));
+ d__1 = sab * sac;
+ p = d_sign(&d__1, c__);
+ tau = 1. / sqrt((d__1 = *b + *c__, abs(d__1)));
+ *a = temp + p;
+ *d__ = temp - p;
+ *b -= *c__;
+ *c__ = 0.;
+ cs1 = sab * tau;
+ sn1 = sac * tau;
+ temp = *cs * cs1 - *sn * sn1;
+ *sn = *cs * sn1 + *sn * cs1;
+ *cs = temp;
+ }
+ } else {
+ *b = -(*c__);
+ *c__ = 0.;
+ temp = *cs;
+ *cs = -(*sn);
+ *sn = temp;
+ }
+ }
+ }
+
+ }
+
+L10:
+
+/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
+
+ *rt1r = *a;
+ *rt2r = *d__;
+ if (*c__ == 0.) {
+ *rt1i = 0.;
+ *rt2i = 0.;
+ } else {
+ *rt1i = sqrt((abs(*b))) * sqrt((abs(*c__)));
+ *rt2i = -(*rt1i);
+ }
+ return 0;
+
+/* End of DLANV2 */
+
+} /* dlanv2_ */
+
+doublereal dlapy2_(doublereal *x, doublereal *y)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal w, z__, xabs, yabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary
+ overflow.
+
+ Arguments
+ =========
+
+ X (input) DOUBLE PRECISION
+ Y (input) DOUBLE PRECISION
+ X and Y specify the values x and y.
+
+ =====================================================================
+*/
+
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ w = max(xabs,yabs);
+ z__ = min(xabs,yabs);
+ if (z__ == 0.) {
+ ret_val = w;
+ } else {
+/* Computing 2nd power */
+ d__1 = z__ / w;
+ ret_val = w * sqrt(d__1 * d__1 + 1.);
+ }
+ return ret_val;
+
+/* End of DLAPY2 */
+
+} /* dlapy2_ */
+
+doublereal dlapy3_(doublereal *x, doublereal *y, doublereal *z__)
+{
+ /* System generated locals */
+ doublereal ret_val, d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal w, xabs, yabs, zabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause
+ unnecessary overflow.
+
+ Arguments
+ =========
+
+ X (input) DOUBLE PRECISION
+ Y (input) DOUBLE PRECISION
+ Z (input) DOUBLE PRECISION
+ X, Y and Z specify the values x, y and z.
+
+ =====================================================================
+*/
+
+
+ xabs = abs(*x);
+ yabs = abs(*y);
+ zabs = abs(*z__);
+/* Computing MAX */
+ d__1 = max(xabs,yabs);
+ w = max(d__1,zabs);
+ if (w == 0.) {
+ ret_val = 0.;
+ } else {
+/* Computing 2nd power */
+ d__1 = xabs / w;
+/* Computing 2nd power */
+ d__2 = yabs / w;
+/* Computing 2nd power */
+ d__3 = zabs / w;
+ ret_val = w * sqrt(d__1 * d__1 + d__2 * d__2 + d__3 * d__3);
+ }
+ return ret_val;
+
+/* End of DLAPY3 */
+
+} /* dlapy3_ */
+
+/* Subroutine */ int dlarf_(char *side, integer *m, integer *n, doublereal *v,
+ integer *incv, doublereal *tau, doublereal *c__, integer *ldc,
+ doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ doublereal d__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARF applies a real elementary reflector H to a real m by n matrix
+ C, from either the left or the right. H is represented in the form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) DOUBLE PRECISION array, dimension
+ (1 + (M-1)*abs(INCV)) if SIDE = 'L'
+ or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
+ The vector v in the representation of H. V is not used if
+ TAU = 0.
+
+ INCV (input) INTEGER
+ The increment between elements of v. INCV <> 0.
+
+ TAU (input) DOUBLE PRECISION
+ The value tau in the representation of H.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (*tau != 0.) {
+
+/* w := C' * v */
+
+ dgemv_("Transpose", m, n, &c_b2865, &c__[c_offset], ldc, &v[1],
+ incv, &c_b2879, &work[1], &c__1);
+
+/* C := C - v * w' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (*tau != 0.) {
+
+/* w := C * v */
+
+ dgemv_("No transpose", m, n, &c_b2865, &c__[c_offset], ldc, &v[1],
+ incv, &c_b2879, &work[1], &c__1);
+
+/* C := C - w * v' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
+ ldc);
+ }
+ }
+ return 0;
+
+/* End of DLARF */
+
+} /* dlarf_ */
+
+/* Subroutine */ int dlarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, doublereal *v, integer *
+ ldv, doublereal *t, integer *ldt, doublereal *c__, integer *ldc,
+ doublereal *work, integer *ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static char transt[1];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFB applies a real block reflector H or its transpose H' to a
+ real m by n matrix C, from either the left or the right.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply H or H' from the Left
+ = 'R': apply H or H' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply H (No transpose)
+ = 'T': apply H' (Transpose)
+
+ DIRECT (input) CHARACTER*1
+ Indicates how H is formed from a product of elementary
+ reflectors
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Indicates how the vectors which define the elementary
+ reflectors are stored:
+ = 'C': Columnwise
+ = 'R': Rowwise
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ K (input) INTEGER
+ The order of the matrix T (= the number of elementary
+ reflectors whose product defines the block reflector).
+
+ V (input) DOUBLE PRECISION array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,M) if STOREV = 'R' and SIDE = 'L'
+ (LDV,N) if STOREV = 'R' and SIDE = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
+ if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
+ if STOREV = 'R', LDV >= K.
+
+ T (input) DOUBLE PRECISION array, dimension (LDT,K)
+ The triangular k by k matrix T in the representation of the
+ block reflector.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (LDWORK,K)
+
+ LDWORK (input) INTEGER
+ The leading dimension of the array WORK.
+ If SIDE = 'L', LDWORK >= max(1,N);
+ if SIDE = 'R', LDWORK >= max(1,M).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 ) (first K rows)
+ ( V2 )
+ where V1 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &
+ c_b2865, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b2865,
+ &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1],
+ ldv, &c_b2865, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = *m - *k;
+ dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b3001,
+ &v[*k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork, &c_b2865, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b2865,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &
+ c_b2865, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b2865, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k
+ + 1 + v_dim1], ldv, &c_b2865, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b3001,
+ &work[work_offset], ldwork, &v[*k + 1 + v_dim1],
+ ldv, &c_b2865, &c__[(*k + 1) * c_dim1 + 1], ldc);
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b2865,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 )
+ ( V2 ) (last K rows)
+ where V2 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &
+ c_b2865, &v[*m - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "No transpose", n, k, &i__1, &c_b2865,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b2865,
+ &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = *m - *k;
+ dgemm_("No transpose", "Transpose", &i__1, n, k, &c_b3001,
+ &v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b2865, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b2865,
+ &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &
+ c_b2865, &v[*n - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b2865, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b2865, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, &i__1, k, &c_b3001,
+ &work[work_offset], ldwork, &v[v_offset], ldv, &
+ c_b2865, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b2865,
+ &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 V2 ) (V1: first K columns)
+ where V1 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b2865,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b2865, &
+ c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 +
+ 1], ldv, &c_b2865, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b3001, &
+ v[(*k + 1) * v_dim1 + 1], ldv, &work[work_offset],
+ ldwork, &c_b2865, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", n, k, &
+ c_b2865, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ dtrmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b2865,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b2865,
+ &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b2865, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b3001, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b2865, &c__[(*k + 1) *
+ c_dim1 + 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ dtrmm_("Right", "Upper", "No transpose", "Unit", m, k, &
+ c_b2865, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 V2 ) (V2: last K columns)
+ where V2 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b2865,
+ &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", n, k, &i__1, &c_b2865, &
+ c__[c_offset], ldc, &v[v_offset], ldv, &c_b2865, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ dtrmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = *m - *k;
+ dgemm_("Transpose", "Transpose", &i__1, n, k, &c_b3001, &
+ v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b2865, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", n, k, &
+ c_b2865, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dcopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ dtrmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b2865,
+ &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "Transpose", m, k, &i__1, &c_b2865,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b2865,
+ &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ dtrmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b2865, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = *n - *k;
+ dgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b3001, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b2865, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ dtrmm_("Right", "Lower", "No transpose", "Unit", m, k, &
+ c_b2865, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of DLARFB */
+
+} /* dlarfb_ */
+
+/* Subroutine */ int dlarfg_(integer *n, doublereal *alpha, doublereal *x,
+ integer *incx, doublereal *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer j, knt;
+ static doublereal beta;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal xnorm;
+
+ static doublereal safmin, rsafmn;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLARFG generates a real elementary reflector H of order n, such
+ that
+
+ H * ( alpha ) = ( beta ), H' * H = I.
+ ( x ) ( 0 )
+
+ where alpha and beta are scalars, and x is an (n-1)-element real
+ vector. H is represented in the form
+
+ H = I - tau * ( 1 ) * ( 1 v' ) ,
+ ( v )
+
+ where tau is a real scalar and v is a real (n-1)-element
+ vector.
+
+ If the elements of x are all zero, then tau = 0 and H is taken to be
+ the unit matrix.
+
+ Otherwise 1 <= tau <= 2.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the elementary reflector.
+
+ ALPHA (input/output) DOUBLE PRECISION
+ On entry, the value alpha.
+ On exit, it is overwritten with the value beta.
+
+ X (input/output) DOUBLE PRECISION array, dimension
+ (1+(N-2)*abs(INCX))
+ On entry, the vector x.
+ On exit, it is overwritten with the vector v.
+
+ INCX (input) INTEGER
+ The increment between elements of X. INCX > 0.
+
+ TAU (output) DOUBLE PRECISION
+ The value tau.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *tau = 0.;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.) {
+
+/* H = I */
+
+ *tau = 0.;
+ } else {
+
+/* general case */
+
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ safmin = SAFEMINIMUM / EPSILON;
+ if (abs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1. / safmin;
+ knt = 0;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ dscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (abs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = dnrm2_(&i__1, &x[1], incx);
+ d__1 = dlapy2_(alpha, &xnorm);
+ beta = -d_sign(&d__1, alpha);
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ d__1 = 1. / (*alpha - beta);
+ dscal_(&i__1, &d__1, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ *alpha = beta;
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ *alpha *= safmin;
+/* L20: */
+ }
+ } else {
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ d__1 = 1. / (*alpha - beta);
+ dscal_(&i__1, &d__1, &x[1], incx);
+ *alpha = beta;
+ }
+ }
+
+ return 0;
+
+/* End of DLARFG */
+
+} /* dlarfg_ */
+
+/* Subroutine */ int dlarft_(char *direct, char *storev, integer *n, integer *
+ k, doublereal *v, integer *ldv, doublereal *tau, doublereal *t,
+ integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), dtrmv_(char *,
+ char *, char *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFT forms the triangular factor T of a real block reflector H
+ of order n, which is defined as a product of k elementary reflectors.
+
+ If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
+
+ If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
+
+ If STOREV = 'C', the vector which defines the elementary reflector
+ H(i) is stored in the i-th column of the array V, and
+
+ H = I - V * T * V'
+
+ If STOREV = 'R', the vector which defines the elementary reflector
+ H(i) is stored in the i-th row of the array V, and
+
+ H = I - V' * T * V
+
+ Arguments
+ =========
+
+ DIRECT (input) CHARACTER*1
+ Specifies the order in which the elementary reflectors are
+ multiplied to form the block reflector:
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Specifies how the vectors which define the elementary
+ reflectors are stored (see also Further Details):
+ = 'C': columnwise
+ = 'R': rowwise
+
+ N (input) INTEGER
+ The order of the block reflector H. N >= 0.
+
+ K (input) INTEGER
+ The order of the triangular factor T (= the number of
+ elementary reflectors). K >= 1.
+
+ V (input/output) DOUBLE PRECISION array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,N) if STOREV = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i).
+
+ T (output) DOUBLE PRECISION array, dimension (LDT,K)
+ The k by k triangular factor T of the block reflector.
+ If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
+ lower triangular. The rest of the array is not used.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ Further Details
+ ===============
+
+ The shape of the matrix V and the storage of the vectors which define
+ the H(i) is best illustrated by the following example with n = 5 and
+ k = 3. The elements equal to 1 are not stored; the corresponding
+ array elements are modified but restored on exit. The rest of the
+ array is not used.
+
+ DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
+
+ V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
+ ( v1 1 ) ( 1 v2 v2 v2 )
+ ( v1 v2 1 ) ( 1 v3 v3 )
+ ( v1 v2 v3 )
+ ( v1 v2 v3 )
+
+ DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
+
+ V = ( v1 v2 v3 ) V = ( v1 v1 1 )
+ ( v1 v2 v3 ) ( v2 v2 v2 1 )
+ ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
+ ( 1 v3 )
+ ( 1 )
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ vii = v[i__ + i__ * v_dim1];
+ v[i__ + i__ * v_dim1] = 1.;
+ if (lsame_(storev, "C")) {
+
+/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__2, &i__3, &d__1, &v[i__ + v_dim1],
+ ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b2879, &t[
+ i__ * t_dim1 + 1], &c__1);
+ } else {
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
+
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__2, &i__3, &d__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b2879, &t[i__ * t_dim1 + 1], &c__1);
+ }
+ v[i__ + i__ * v_dim1] = vii;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dtrmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L20: */
+ }
+ } else {
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ vii = v[*n - *k + i__ + i__ * v_dim1];
+ v[*n - *k + i__ + i__ * v_dim1] = 1.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
+*/
+
+ i__1 = *n - *k + i__;
+ i__2 = *k - i__;
+ d__1 = -tau[i__];
+ dgemv_("Transpose", &i__1, &i__2, &d__1, &v[(i__ + 1)
+ * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], &
+ c__1, &c_b2879, &t[i__ + 1 + i__ * t_dim1], &
+ c__1);
+ v[*n - *k + i__ + i__ * v_dim1] = vii;
+ } else {
+ vii = v[i__ + (*n - *k + i__) * v_dim1];
+ v[i__ + (*n - *k + i__) * v_dim1] = 1.;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
+*/
+
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__;
+ d__1 = -tau[i__];
+ dgemv_("No transpose", &i__1, &i__2, &d__1, &v[i__ +
+ 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
+ c_b2879, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ v[i__ + (*n - *k + i__) * v_dim1] = vii;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ dtrmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of DLARFT */
+
+} /* dlarft_ */
+
+/* Subroutine */ int dlarfx_(char *side, integer *m, integer *n, doublereal *
+ v, doublereal *tau, doublereal *c__, integer *ldc, doublereal *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer j;
+ static doublereal t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5,
+ v6, v7, v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int dger_(integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLARFX applies a real elementary reflector H to a real m by n
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix
+
+ This version uses inline code if H has order < 11.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) DOUBLE PRECISION array, dimension (M) if SIDE = 'L'
+ or (N) if SIDE = 'R'
+ The vector v in the representation of H.
+
+ TAU (input) DOUBLE PRECISION
+ The value tau in the representation of H.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= (1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+ WORK is not referenced if H has order < 11.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (*tau == 0.) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/*
+ Code for general M
+
+ w := C'*v
+*/
+
+ dgemv_("Transpose", m, n, &c_b2865, &c__[c_offset], ldc, &v[1], &c__1,
+ &c_b2879, &work[1], &c__1);
+
+/* C := C - tau * v * w' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+ c__[j * c_dim1 + 10] -= sum * t10;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/*
+ Code for general N
+
+ w := C * v
+*/
+
+ dgemv_("No transpose", m, n, &c_b2865, &c__[c_offset], ldc, &v[1], &
+ c__1, &c_b2879, &work[1], &c__1);
+
+/* C := C - tau * w * v' */
+
+ d__1 = -(*tau);
+ dger_(m, n, &d__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1. - *tau * v[1] * v[1];
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j + c_dim1] = t1 * c__[j + c_dim1];
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+ c__[j + c_dim1 * 10] -= sum * t10;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of DLARFX */
+
+} /* dlarfx_ */
+
+/* Subroutine */ int dlartg_(doublereal *f, doublereal *g, doublereal *cs,
+ doublereal *sn, doublereal *r__)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal f1, g1, eps, scale;
+ static integer count;
+ static doublereal safmn2, safmx2;
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLARTG generate a plane rotation so that
+
+ [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1.
+ [ -SN CS ] [ G ] [ 0 ]
+
+ This is a slower, more accurate version of the BLAS1 routine DROTG,
+ with the following other differences:
+ F and G are unchanged on return.
+ If G=0, then CS=1 and SN=0.
+ If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any
+ floating point operations (saves work in DBDSQR when
+ there are zeros on the diagonal).
+
+ If F exceeds G in magnitude, CS will be positive.
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The first component of vector to be rotated.
+
+ G (input) DOUBLE PRECISION
+ The second component of vector to be rotated.
+
+ CS (output) DOUBLE PRECISION
+ The cosine of the rotation.
+
+ SN (output) DOUBLE PRECISION
+ The sine of the rotation.
+
+ R (output) DOUBLE PRECISION
+ The nonzero component of the rotated vector.
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ safmin = SAFEMINIMUM;
+ eps = EPSILON;
+ d__1 = BASE;
+ i__1 = (integer) (log(safmin / eps) / log(BASE) /
+ 2.);
+ safmn2 = pow_di(&d__1, &i__1);
+ safmx2 = 1. / safmn2;
+ }
+ if (*g == 0.) {
+ *cs = 1.;
+ *sn = 0.;
+ *r__ = *f;
+ } else if (*f == 0.) {
+ *cs = 0.;
+ *sn = 1.;
+ *r__ = *g;
+ } else {
+ f1 = *f;
+ g1 = *g;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ count = 0;
+L10:
+ ++count;
+ f1 *= safmn2;
+ g1 *= safmn2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale >= safmx2) {
+ goto L10;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmx2;
+/* L20: */
+ }
+ } else if (scale <= safmn2) {
+ count = 0;
+L30:
+ ++count;
+ f1 *= safmx2;
+ g1 *= safmx2;
+/* Computing MAX */
+ d__1 = abs(f1), d__2 = abs(g1);
+ scale = max(d__1,d__2);
+ if (scale <= safmn2) {
+ goto L30;
+ }
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmn2;
+/* L40: */
+ }
+ } else {
+/* Computing 2nd power */
+ d__1 = f1;
+/* Computing 2nd power */
+ d__2 = g1;
+ *r__ = sqrt(d__1 * d__1 + d__2 * d__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ }
+ if (abs(*f) > abs(*g) && *cs < 0.) {
+ *cs = -(*cs);
+ *sn = -(*sn);
+ *r__ = -(*r__);
+ }
+ }
+ return 0;
+
+/* End of DLARTG */
+
+} /* dlartg_ */
+
+/* Subroutine */ int dlas2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax)
+{
+ /* System generated locals */
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DLAS2 computes the singular values of the 2-by-2 matrix
+ [ F G ]
+ [ 0 H ].
+ On return, SSMIN is the smaller singular value and SSMAX is the
+ larger singular value.
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) DOUBLE PRECISION
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) DOUBLE PRECISION
+ The smaller singular value.
+
+ SSMAX (output) DOUBLE PRECISION
+ The larger singular value.
+
+ Further Details
+ ===============
+
+ Barring over/underflow, all output quantities are correct to within
+ a few units in the last place (ulps), even in the absence of a guard
+ digit in addition/subtraction.
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows, or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ ====================================================================
+*/
+
+
+ fa = abs(*f);
+ ga = abs(*g);
+ ha = abs(*h__);
+ fhmn = min(fa,ha);
+ fhmx = max(fa,ha);
+ if (fhmn == 0.) {
+ *ssmin = 0.;
+ if (fhmx == 0.) {
+ *ssmax = ga;
+ } else {
+/* Computing 2nd power */
+ d__1 = min(fhmx,ga) / max(fhmx,ga);
+ *ssmax = max(fhmx,ga) * sqrt(d__1 * d__1 + 1.);
+ }
+ } else {
+ if (ga < fhmx) {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = ga / fhmx;
+ au = d__1 * d__1;
+ c__ = 2. / (sqrt(as * as + au) + sqrt(at * at + au));
+ *ssmin = fhmn * c__;
+ *ssmax = fhmx / c__;
+ } else {
+ au = fhmx / ga;
+ if (au == 0.) {
+
+/*
+ Avoid possible harmful underflow if exponent range
+ asymmetric (true SSMIN may not underflow even if
+ AU underflows)
+*/
+
+ *ssmin = fhmn * fhmx / ga;
+ *ssmax = ga;
+ } else {
+ as = fhmn / fhmx + 1.;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ d__1 = as * au;
+/* Computing 2nd power */
+ d__2 = at * au;
+ c__ = 1. / (sqrt(d__1 * d__1 + 1.) + sqrt(d__2 * d__2 + 1.));
+ *ssmin = fhmn * c__ * au;
+ *ssmin += *ssmin;
+ *ssmax = ga / (c__ + c__);
+ }
+ }
+ }
+ return 0;
+
+/* End of DLAS2 */
+
+} /* dlas2_ */
+
+/* Subroutine */ int dlascl_(char *type__, integer *kl, integer *ku,
+ doublereal *cfrom, doublereal *cto, integer *m, integer *n,
+ doublereal *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, k1, k2, k3, k4;
+ static doublereal mul, cto1;
+ static logical done;
+ static doublereal ctoc;
+ extern logical lsame_(char *, char *);
+ static integer itype;
+ static doublereal cfrom1;
+
+ static doublereal cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLASCL multiplies the M by N real matrix A by the real scalar
+ CTO/CFROM. This is done without over/underflow as long as the final
+ result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
+ A may be full, upper triangular, lower triangular, upper Hessenberg,
+ or banded.
+
+ Arguments
+ =========
+
+ TYPE (input) CHARACTER*1
+ TYPE indices the storage type of the input matrix.
+ = 'G': A is a full matrix.
+ = 'L': A is a lower triangular matrix.
+ = 'U': A is an upper triangular matrix.
+ = 'H': A is an upper Hessenberg matrix.
+ = 'B': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the lower
+ half stored.
+ = 'Q': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the upper
+ half stored.
+ = 'Z': A is a band matrix with lower bandwidth KL and upper
+ bandwidth KU.
+
+ KL (input) INTEGER
+ The lower bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ KU (input) INTEGER
+ The upper bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ CFROM (input) DOUBLE PRECISION
+ CTO (input) DOUBLE PRECISION
+ The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
+ without over/underflow if the final result CTO*A(I,J)/CFROM
+ can be represented without over/underflow. CFROM must be
+ nonzero.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,M)
+ The matrix to be multiplied by CTO/CFROM. See TYPE for the
+ storage type.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ INFO (output) INTEGER
+ 0 - successful exit
+ <0 - if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.) {
+ *info = -4;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (((*n < 0) || (itype == 4 && *n != *m)) || (itype == 5 && *n !=
+ *m)) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if ((*kl < 0) || (*kl > max(i__1,0))) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (((*ku < 0) || (*ku > max(i__1,0))) || (((itype == 4) || (
+ itype == 5)) && *kl != *ku)) {
+ *info = -3;
+ } else if (((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda
+ < *ku + 1)) || (itype == 6 && *lda < ((*kl) << (1)) + *ku
+ + 1)) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*m == 0)) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = SAFEMINIMUM;
+ bignum = 1. / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ cto1 = ctoc / bignum;
+ if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (abs(cto1) > abs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = ((*kl) << (1)) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of DLASCL */
+
+} /* dlascl_ */
+
+/* Subroutine */ int dlasd0_(integer *n, integer *sqre, doublereal *d__,
+ doublereal *e, doublereal *u, integer *ldu, doublereal *vt, integer *
+ ldvt, integer *smlsiz, integer *iwork, doublereal *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf,
+ iwk, lvl, ndb1, nlp1, nrp1;
+ static doublereal beta;
+ static integer idxq, nlvl;
+ static doublereal alpha;
+ static integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
+ extern /* Subroutine */ int dlasd1_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *, integer *, doublereal *,
+ integer *), dlasdq_(char *, integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), xerbla_(
+ char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, DLASD0 computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M
+ matrix B with diagonal D and offdiagonal E, where M = N + SQRE.
+ The algorithm computes orthogonal matrices U and VT such that
+ B = U * S * VT. The singular values S are overwritten on D.
+
+ A related subroutine, DLASDA, computes only the singular values,
+ and optionally, the singular vectors in compact form.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the row dimension of the upper bidiagonal matrix.
+ This is also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N+1;
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix.
+ On exit D, if INFO = 0, contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension (M-1)
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array, dimension at least (LDQ, N)
+ On exit, U contains the left singular vectors.
+
+ LDU (input) INTEGER
+ On entry, leading dimension of U.
+
+ VT (output) DOUBLE PRECISION array, dimension at least (LDVT, M)
+ On exit, VT' contains the right singular vectors.
+
+ LDVT (input) INTEGER
+ On entry, leading dimension of VT.
+
+ SMLSIZ (input) INTEGER
+ On entry, maximum size of the subproblems at the
+ bottom of the computation tree.
+
+ IWORK INTEGER work array.
+ Dimension must be at least (8 * N)
+
+ WORK DOUBLE PRECISION work array.
+ Dimension must be at least (3 * M**2 + 2 * M)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -2;
+ }
+
+ m = *n + *sqre;
+
+ if (*ldu < *n) {
+ *info = -6;
+ } else if (*ldvt < m) {
+ *info = -8;
+ } else if (*smlsiz < 3) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD0", &i__1);
+ return 0;
+ }
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
+ ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
+ return 0;
+ }
+
+/* Set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ For the nodes on bottom level of the tree, solve
+ their subproblems by DLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ ncc = 0;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nrp1 = nr + 1;
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sqrei = 1;
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
+ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
+ nlf + nlf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + nlf - 2;
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j] = j;
+/* L10: */
+ }
+ if (i__ == nd) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ nrp1 = nr + sqrei;
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
+ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
+ nrf + nrf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + ic;
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j - 1] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+
+/*
+ Find the first node LF and last node LL on the
+ current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ if (*sqre == 0 && i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ idxqc = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ dlasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
+ u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
+ idxqc], &iwork[iwk], &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASD0 */
+
+} /* dlasd0_ */
+
+/* Subroutine */ int dlasd1_(integer *nl, integer *nr, integer *sqre,
+ doublereal *d__, doublereal *alpha, doublereal *beta, doublereal *u,
+ integer *ldu, doublereal *vt, integer *ldvt, integer *idxq, integer *
+ iwork, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2,
+ idxc, idxp, ldvt2;
+ extern /* Subroutine */ int dlasd2_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlasd3_(
+ integer *, integer *, integer *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, doublereal *, integer *),
+ dlascl_(char *, integer *, integer *, doublereal *, doublereal *,
+ integer *, integer *, doublereal *, integer *, integer *),
+ dlamrg_(integer *, integer *, doublereal *, integer *, integer *,
+ integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal orgnrm;
+ static integer coltyp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B,
+ where N = NL + NR + 1 and M = N + SQRE. DLASD1 is called from DLASD0.
+
+ A related subroutine DLASD7 handles the case in which the singular
+ values (and the singular vectors in factored form) are desired.
+
+ DLASD1 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The left singular vectors of the original matrix are stored in U, and
+ the transpose of the right singular vectors are stored in VT, and the
+ singular values are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or when there are zeros in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLASD2.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the square roots of the
+ roots of the secular equation via the routine DLASD4 (as called
+ by DLASD3). This routine also calculates the singular vectors of
+ the current problem.
+
+ The final stage consists of computing the updated singular vectors
+ directly using the updated singular values. The singular vectors
+ for the current problem are multiplied with the singular vectors
+ from the overall problem.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) DOUBLE PRECISION array,
+ dimension (N = NL+NR+1).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block; and D(NL+2:N) contains the singular values of
+ the lower block. On exit D(1:N) contains the singular values
+ of the modified matrix.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
+ On entry U(1:NL, 1:NL) contains the left singular vectors of
+ the upper block; U(NL+2:N, NL+2:N) contains the left singular
+ vectors of the lower block. On exit U contains the left
+ singular vectors of the bidiagonal matrix.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max( 1, N ).
+
+ VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
+ where M = N + SQRE.
+ On entry VT(1:NL+1, 1:NL+1)' contains the right singular
+ vectors of the upper block; VT(NL+2:M, NL+2:M)' contains
+ the right singular vectors of the lower block. On exit
+ VT' contains the right singular vectors of the
+ bidiagonal matrix.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= max( 1, M ).
+
+ IDXQ (output) INTEGER array, dimension(N)
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ IWORK (workspace) INTEGER array, dimension( 4 * N )
+
+ WORK (workspace) DOUBLE PRECISION array, dimension( 3*M**2 + 2*M )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --idxq;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD1", &i__1);
+ return 0;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLASD2 and DLASD3.
+*/
+
+ ldu2 = n;
+ ldvt2 = m;
+
+ iz = 1;
+ isigma = iz + m;
+ iu2 = isigma + n;
+ ivt2 = iu2 + ldu2 * n;
+ iq = ivt2 + ldvt2 * m;
+
+ idx = 1;
+ idxc = idx + n;
+ coltyp = idxc + n;
+ idxp = coltyp + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &n, &c__1, &d__[1], &n,
+ info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Deflate singular values. */
+
+ dlasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
+ ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
+ work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
+ idxq[1], &iwork[coltyp], info);
+
+/* Solve Secular Equation and update singular vectors. */
+
+ ldq = k;
+ dlasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
+ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
+ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, &n, &c__1, &d__[1], &n,
+ info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = n - k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD1 */
+
+} /* dlasd1_ */
+
+/* Subroutine */ int dlasd2_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *z__, doublereal *alpha, doublereal *
+ beta, doublereal *u, integer *ldu, doublereal *vt, integer *ldvt,
+ doublereal *dsigma, doublereal *u2, integer *ldu2, doublereal *vt2,
+ integer *ldvt2, integer *idxp, integer *idx, integer *idxc, integer *
+ idxq, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
+ vt2_dim1, vt2_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, j, m, n;
+ static doublereal s;
+ static integer k2;
+ static doublereal z1;
+ static integer ct, jp;
+ static doublereal eps, tau, tol;
+ static integer psm[4], nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer ctot[4], idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer jprev;
+
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), dlacpy_(char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *), dlaset_(char *, integer *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ static doublereal hlftol;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASD2 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ singular values are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ DLASD2 is called from DLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) DOUBLE PRECISION array, dimension(N)
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) DOUBLE PRECISION array, dimension(LDU,N)
+ On entry U contains the left singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL, NL), and (NL+2, NL+2), (N,N).
+ On exit U contains the trailing (N-K) updated left singular
+ vectors (those which were deflated) in its last N-K columns.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ Z (output) DOUBLE PRECISION array, dimension(N)
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ DSIGMA (output) DOUBLE PRECISION array, dimension (N)
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ U2 (output) DOUBLE PRECISION array, dimension(LDU2,N)
+ Contains a copy of the first K-1 left singular vectors which
+ will be used by DLASD3 in a matrix multiply (DGEMM) to solve
+ for the new left singular vectors. U2 is arranged into four
+ blocks. The first block contains a column with 1 at NL+1 and
+ zero everywhere else; the second block contains non-zero
+ entries only at and above NL; the third contains non-zero
+ entries only below NL+1; and the fourth is dense.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input/output) DOUBLE PRECISION array, dimension(LDVT,M)
+ On entry VT' contains the right singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL+1, NL+1), and (NL+2, NL+2), (M,M).
+ On exit VT' contains the trailing (N-K) updated right singular
+ vectors (those which were deflated) in its last N-K columns.
+ In case SQRE =1, the last row of VT spans the right null
+ space.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= M.
+
+ VT2 (output) DOUBLE PRECISION array, dimension(LDVT2,N)
+ VT2' contains a copy of the first K right singular vectors
+ which will be used by DLASD3 in a matrix multiply (DGEMM) to
+ solve for the new right singular vectors. VT2 is arranged into
+ three blocks. The first block contains a row that corresponds
+ to the special 0 diagonal element in SIGMA; the second block
+ contains non-zeros only at and before NL +1; the third block
+ contains non-zeros only at and after NL +2.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= M.
+
+ IDXP (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDX (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXC (output) INTEGER array, dimension(N)
+ This will contain the permutation used to arrange the columns
+ of the deflated U matrix into three groups: the first group
+ contains non-zero entries only at and above NL, the second
+ contains non-zero entries only below NL+2, and the third is
+ dense.
+
+ COLTYP (workspace/output) INTEGER array, dimension(N)
+ As workspace, this will contain a label which will indicate
+ which of the following types a column in the U2 matrix or a
+ row in the VT2 matrix is:
+ 1 : non-zero in the upper half only
+ 2 : non-zero in the lower half only
+ 3 : dense
+ 4 : deflated
+
+ On exit, it is an array of dimension 4, with COLTYP(I) being
+ the dimension of the I-th type columns.
+
+ IDXQ (input) INTEGER array, dimension(N)
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first hlaf of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --dsigma;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxp;
+ --idx;
+ --idxc;
+ --idxq;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*ldu < n) {
+ *info = -10;
+ } else if (*ldvt < m) {
+ *info = -12;
+ } else if (*ldu2 < n) {
+ *info = -15;
+ } else if (*ldvt2 < m) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD2", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+/*
+ Generate the first part of the vector Z; and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
+ z__[1] = z1;
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
+/* L20: */
+ }
+
+/* Initialize some reference arrays. */
+
+ i__1 = nlp1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L30: */
+ }
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 2;
+/* L40: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L50: */
+ }
+
+/*
+ DSIGMA, IDXC, IDXC, and the first column of U2
+ are used as storage space.
+*/
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ u2[i__ + u2_dim1] = z__[idxq[i__]];
+ idxc[i__] = coltyp[idxq[i__]];
+/* L60: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = u2[idxi + u2_dim1];
+ coltyp[i__] = idxc[idxi];
+/* L70: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ eps = EPSILON;
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 8. * max(d__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ if (j == n) {
+ goto L120;
+ }
+ } else {
+ jprev = j;
+ goto L90;
+ }
+/* L80: */
+ }
+L90:
+ j = jprev;
+L100:
+ ++j;
+ if (j > n) {
+ goto L110;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ s = z__[jprev];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(&c__, &s);
+ c__ /= tau;
+ s = -s / tau;
+ z__[j] = tau;
+ z__[jprev] = 0.;
+
+/*
+ Apply back the Givens rotation to the left and right
+ singular vector matrices.
+*/
+
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ drot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
+ c__1, &c__, &s);
+ drot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
+ c__, &s);
+ if (coltyp[j] != coltyp[jprev]) {
+ coltyp[j] = 3;
+ }
+ coltyp[jprev] = 4;
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L100;
+L110:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L120:
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four groups of uniform structure (although one or more of these
+ groups may be empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L130: */
+ }
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L140: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 2;
+ psm[1] = ctot[0] + 2;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+
+/*
+ Fill out the IDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's, starting from the
+ second column. This applies similarly to the rows of VT.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ ct = coltyp[jp];
+ idxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L150: */
+ }
+
+/*
+ Sort the singular values and corresponding singular vectors into
+ DSIGMA, U2, and VT2 respectively. The singular values/vectors
+ which were not deflated go into the first K slots of DSIGMA, U2,
+ and VT2 respectively, while those which were deflated go into the
+ last N - K slots, except that the first column/row will be treated
+ separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ idxj = idxq[idx[idxp[idxc[j]]] + 1];
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ dcopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
+ dcopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
+/* L160: */
+ }
+
+/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ c__ = 1.;
+ s = 0.;
+ z__[1] = tol;
+ } else {
+ c__ = z1 / z__[1];
+ s = z__[m] / z__[1];
+ }
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Move the rest of the updating row to Z. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
+
+/*
+ Determine the first column of U2, the first row of VT2 and the
+ last row of VT.
+*/
+
+ dlaset_("A", &n, &c__1, &c_b2879, &c_b2879, &u2[u2_offset], ldu2);
+ u2[nlp1 + u2_dim1] = 1.;
+ if (m > n) {
+ i__1 = nlp1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
+ vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
+/* L170: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
+ vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
+/* L180: */
+ }
+ } else {
+ dcopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
+ }
+ if (m > n) {
+ dcopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
+ }
+
+/*
+ The deflated singular values and their corresponding vectors go
+ into the back of D, U, and V respectively.
+*/
+
+ if (n > *k) {
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = n - *k;
+ dlacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
+ * u_dim1 + 1], ldu);
+ i__1 = n - *k;
+ dlacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
+ vt_dim1], ldvt);
+ }
+
+/* Copy CTOT into COLTYP for referencing in DLASD3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L190: */
+ }
+
+ return 0;
+
+/* End of DLASD2 */
+
+} /* dlasd2_ */
+
+/* Subroutine */ int dlasd3_(integer *nl, integer *nr, integer *sqre, integer
+ *k, doublereal *d__, doublereal *q, integer *ldq, doublereal *dsigma,
+ doublereal *u, integer *ldu, doublereal *u2, integer *ldu2,
+ doublereal *vt, integer *ldvt, doublereal *vt2, integer *ldvt2,
+ integer *idxc, integer *ctot, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
+ vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j, m, n, jc;
+ static doublereal rho;
+ static integer nlp1, nlp2, nrp1;
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ static integer ctemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer ktemp;
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASD3 finds all the square roots of the roots of the secular
+ equation, as defined by the values in D and Z. It makes the
+ appropriate calls to DLASD4 and then updates the singular
+ vectors by matrix multiplication.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ DLASD3 is called from DLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (input) INTEGER
+ The size of the secular equation, 1 =< K = < N.
+
+ D (output) DOUBLE PRECISION array, dimension(K)
+ On exit the square roots of the roots of the secular equation,
+ in ascending order.
+
+ Q (workspace) DOUBLE PRECISION array,
+ dimension at least (LDQ,K).
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= K.
+
+ DSIGMA (input) DOUBLE PRECISION array, dimension(K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ U (input) DOUBLE PRECISION array, dimension (LDU, N)
+ The last N - K columns of this matrix contain the deflated
+ left singular vectors.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ U2 (input) DOUBLE PRECISION array, dimension (LDU2, N)
+ The first K columns of this matrix contain the non-deflated
+ left singular vectors for the split problem.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input) DOUBLE PRECISION array, dimension (LDVT, M)
+ The last M - K columns of VT' contain the deflated
+ right singular vectors.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= N.
+
+ VT2 (input) DOUBLE PRECISION array, dimension (LDVT2, N)
+ The first K columns of VT2' contain the non-deflated
+ right singular vectors for the split problem.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= N.
+
+ IDXC (input) INTEGER array, dimension ( N )
+ The permutation used to arrange the columns of U (and rows of
+ VT) into three groups: the first group contains non-zero
+ entries only at and above (or before) NL +1; the second
+ contains non-zero entries only at and below (or after) NL+2;
+ and the third is dense. The first column of U and the row of
+ VT are treated separately, however.
+
+ The rows of the singular vectors found by DLASD4
+ must be likewise permuted before the matrix multiplies can
+ take place.
+
+ CTOT (input) INTEGER array, dimension ( 4 )
+ A count of the total number of the various types of columns
+ in U (or rows in VT), as described in IDXC. The fourth column
+ type is any column which has been deflated.
+
+ Z (input) DOUBLE PRECISION array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dsigma;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxc;
+ --ctot;
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+ if ((*k < 1) || (*k > n)) {
+ *info = -4;
+ } else if (*ldq < *k) {
+ *info = -7;
+ } else if (*ldu < n) {
+ *info = -10;
+ } else if (*ldu2 < n) {
+ *info = -12;
+ } else if (*ldvt < m) {
+ *info = -14;
+ } else if (*ldvt2 < m) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ dcopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
+ if (z__[1] > 0.) {
+ dcopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ } else {
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ u[i__ + u_dim1] = -u2[i__ + u2_dim1];
+/* L10: */
+ }
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L20: */
+ }
+
+/* Keep a copy of Z. */
+
+ dcopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b2865, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Find the new singular values. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
+ &vt[j * vt_dim1 + 1], info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+/* L30: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
+/* L40: */
+ }
+ i__2 = *k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
+/* L50: */
+ }
+ d__2 = sqrt((d__1 = z__[i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &q[i__ + q_dim1]);
+/* L60: */
+ }
+
+/*
+ Compute left singular vectors of the modified diagonal matrix,
+ and store related information for the right singular vectors.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
+ vt_dim1 + 1];
+ u[i__ * u_dim1 + 1] = -1.;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
+ * vt_dim1];
+ u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
+/* L70: */
+ }
+ temp = dnrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
+ q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Update the left singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", &n, k, k, &c_b2865, &u2[u2_offset], ldu2, &q[
+ q_offset], ldq, &c_b2879, &u[u_offset], ldu);
+ goto L100;
+ }
+ if (ctot[1] > 0) {
+ dgemm_("N", "N", nl, k, &ctot[1], &c_b2865, &u2[((u2_dim1) << (1)) +
+ 1], ldu2, &q[q_dim1 + 2], ldq, &c_b2879, &u[u_dim1 + 1], ldu);
+ if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b2865, &u2[ktemp * u2_dim1 +
+ 1], ldu2, &q[ktemp + q_dim1], ldq, &c_b2865, &u[u_dim1 +
+ 1], ldu);
+ }
+ } else if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ dgemm_("N", "N", nl, k, &ctot[3], &c_b2865, &u2[ktemp * u2_dim1 + 1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b2879, &u[u_dim1 + 1], ldu);
+ } else {
+ dlacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
+ }
+ dcopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
+ ktemp = ctot[1] + 2;
+ ctemp = ctot[2] + ctot[3];
+ dgemm_("N", "N", nr, k, &ctemp, &c_b2865, &u2[nlp2 + ktemp * u2_dim1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b2879, &u[nlp2 + u_dim1], ldu);
+
+/* Generate the right singular vectors. */
+
+L100:
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = dnrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
+ q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* Update the right singular vector matrix. */
+
+ if (*k == 2) {
+ dgemm_("N", "N", k, &m, k, &c_b2865, &q[q_offset], ldq, &vt2[
+ vt2_offset], ldvt2, &c_b2879, &vt[vt_offset], ldvt);
+ return 0;
+ }
+ ktemp = ctot[1] + 1;
+ dgemm_("N", "N", k, &nlp1, &ktemp, &c_b2865, &q[q_dim1 + 1], ldq, &vt2[
+ vt2_dim1 + 1], ldvt2, &c_b2879, &vt[vt_dim1 + 1], ldvt);
+ ktemp = ctot[1] + 2 + ctot[2];
+ if (ktemp <= *ldvt2) {
+ dgemm_("N", "N", k, &nlp1, &ctot[3], &c_b2865, &q[ktemp * q_dim1 + 1],
+ ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b2865, &vt[vt_dim1 +
+ 1], ldvt);
+ }
+
+ ktemp = ctot[1] + 1;
+ nrp1 = *nr + *sqre;
+ if (ktemp > 1) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
+/* L130: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
+/* L140: */
+ }
+ }
+ ctemp = ctot[2] + 1 + ctot[3];
+ dgemm_("N", "N", k, &nrp1, &ctemp, &c_b2865, &q[ktemp * q_dim1 + 1], ldq,
+ &vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b2879, &vt[nlp2 *
+ vt_dim1 + 1], ldvt);
+
+ return 0;
+
+/* End of DLASD3 */
+
+} /* dlasd3_ */
+
+/* Subroutine */ int dlasd4_(integer *n, integer *i__, doublereal *d__,
+ doublereal *z__, doublereal *delta, doublereal *rho, doublereal *
+ sigma, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal a, b, c__;
+ static integer j;
+ static doublereal w, dd[3];
+ static integer ii;
+ static doublereal dw, zz[3];
+ static integer ip1;
+ static doublereal eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static doublereal dphi, dpsi;
+ static integer iter;
+ static doublereal temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq,
+ dtiip;
+ static integer niter;
+ static doublereal dtisq;
+ static logical swtch;
+ static doublereal dtnsq;
+ extern /* Subroutine */ int dlaed6_(integer *, logical *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *)
+ , dlasd5_(integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ static doublereal delsq2, dtnsq1;
+ static logical swtch3;
+
+ static logical orgati;
+ static doublereal erretm, dtipsq, rhoinv;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th updated
+ eigenvalue of a positive symmetric rank-one modification to
+ a positive diagonal matrix whose entries are given as the squares
+ of the corresponding entries in the array d, and that
+
+ 0 <= D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) * diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) DOUBLE PRECISION array, dimension ( N )
+ The original eigenvalues. It is assumed that they are in
+ order, 0 <= D(I) < D(J) for I < J.
+
+ Z (input) DOUBLE PRECISION array, dimension ( N )
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension ( N )
+ If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ (singular) eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ SIGMA (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( N )
+ If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th
+ component. If N = 1, then WORK( 1 ) = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
+ delta[1] = 1.;
+ work[1] = 1.;
+ return 0;
+ }
+ if (*n == 2) {
+ dlasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = EPSILON;
+ rhoinv = 1. / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ temp = *rho / 2.;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*n] + temp1;
+ delta[j] = d__[j] - d__[*n] - temp1;
+/* L10: */
+ }
+
+ psi = 0.;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (delta[j] * work[j]);
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
+ n] / (delta[*n] * work[*n]);
+
+ if (w <= 0.) {
+ temp1 = sqrt(d__[*n] * d__[*n] + *rho);
+ temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
+ n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
+ z__[*n] / *rho;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
+ n];
+ b = z__[*n] * z__[*n] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO
+*/
+
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * delsq;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (a < 0.) {
+ tau = b * 2. / (sqrt(a * a + b * 4. * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4. * c__)) / (c__ * 2.);
+ }
+
+/*
+ It can be proved that
+ D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2
+*/
+
+ }
+
+/* The following ETA is to approximate SIGMA_n - D( N ) */
+
+ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
+
+ *sigma = d__[*n] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - eta;
+ work[j] = d__[j] + d__[*i__] + eta;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (delta[j] * work[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (delta[*n] * work[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
+ b = dtnsq * dtnsq1 * w;
+ if (c__ < 0.) {
+ c__ = abs(c__);
+ }
+ if (c__ == 0.) {
+ eta = *rho - *sigma * *sigma;
+ } else if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (c__
+ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))
+ );
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp > *rho) {
+ eta = *rho + dtnsq;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L50: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (dpsi
+ + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
+ b = dtnsq1 * dtnsq * w;
+ if (a >= 0.) {
+ eta = (a + sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a - sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp <= 0.) {
+ eta /= 2.;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L70: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8. + erretm - phi + rhoinv + abs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ goto L240;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
+ delsq2 = delsq / 2.;
+ temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + temp;
+ delta[j] = d__[j] - d__[*i__] - temp;
+/* L100: */
+ }
+
+ psi = 0.;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L110: */
+ }
+
+ phi = 0.;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
+ ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
+
+ if (w > 0.) {
+
+/*
+ d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ sg2lb = 0.;
+ sg2ub = delsq2;
+ a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * delsq;
+ if (a > 0.) {
+ tau = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( I )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( I ).
+*/
+
+ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
+ } else {
+
+/*
+ (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ sg2lb = -delsq2;
+ sg2ub = 0.;
+ a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * delsq;
+ if (a < 0.) {
+ tau = b * 2. / (a - sqrt((d__1 = a * a + b * 4. * c__, abs(
+ d__1))));
+ } else {
+ tau = -(a + sqrt((d__1 = a * a + b * 4. * c__, abs(d__1)))) /
+ (c__ * 2.);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( IP1 ).
+*/
+
+ eta = tau / (d__[ip1] + sqrt((d__1 = d__[ip1] * d__[ip1] + tau,
+ abs(d__1))));
+ }
+
+ if (orgati) {
+ ii = *i__;
+ *sigma = d__[*i__] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + eta;
+ delta[j] = d__[j] - d__[*i__] - eta;
+/* L130: */
+ }
+ } else {
+ ii = *i__ + 1;
+ *sigma = d__[ip1] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[ip1] + eta;
+ delta[j] = d__[j] - d__[ip1] - eta;
+/* L140: */
+ }
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.) {
+ swtch3 = TRUE_;
+ }
+ }
+ if ((ii == 1) || (ii == *n)) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
+ dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
+ dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1)))) / (
+ c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__, abs(
+ d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if ((temp > sg2ub) || (temp < sg2lb)) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ prew = w;
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L170: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L180: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L190: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3. +
+ abs(tau) * dw;
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > abs(prew) / 10.) {
+ swtch = TRUE_;
+ }
+ }
+
+/* Main loop to update the values of the array DELTA and WORK */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (abs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ d__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (d__1 * d__1);
+ } else {
+/* Computing 2nd power */
+ d__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (d__1 * d__1);
+ }
+ } else {
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - dtisq * dpsi - dtipsq * dphi;
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.) {
+ if (a == 0.) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
+ dpsi + dphi);
+ }
+ } else {
+ a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.) {
+ eta = (a - sqrt((d__1 = a * a - b * 4. * c__, abs(d__1))))
+ / (c__ * 2.);
+ } else {
+ eta = b * 2. / (a + sqrt((d__1 = a * a - b * 4. * c__,
+ abs(d__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - dtiim * dpsi - dtiip * dphi;
+ zz[0] = dtiim * dtiim * dpsi;
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - temp2;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - temp2;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ dlaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if ((temp > sg2ub) || (temp < sg2lb)) {
+ if (w < 0.) {
+ eta = (sg2ub - tau) / 2.;
+ } else {
+ eta = (sg2lb - tau) / 2.;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L200: */
+ }
+
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.;
+ psi = 0.;
+ erretm = 0.;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L210: */
+ }
+ erretm = abs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.;
+ phi = 0.;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L220: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8. + erretm + rhoinv * 2. + abs(temp) * 3.
+ + abs(tau) * dw;
+ if (w * prew > 0. && abs(w) > abs(prew) / 10.) {
+ swtch = ! swtch;
+ }
+
+ if (w <= 0.) {
+ sg2lb = max(sg2lb,tau);
+ } else {
+ sg2ub = min(sg2ub,tau);
+ }
+
+/* L230: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+
+ }
+
+L240:
+ return 0;
+
+/* End of DLASD4 */
+
+} /* dlasd4_ */
+
+/* Subroutine */ int dlasd5_(integer *i__, doublereal *d__, doublereal *z__,
+ doublereal *delta, doublereal *rho, doublereal *dsigma, doublereal *
+ work)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal b, c__, w, del, tau, delsq;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th eigenvalue
+ of a positive symmetric rank-one modification of a 2-by-2 diagonal
+ matrix
+
+ diag( D ) * diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal entries in the array D are assumed to satisfy
+
+ 0 <= D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) DOUBLE PRECISION array, dimension ( 2 )
+ The original eigenvalues. We assume 0 <= D(1) < D(2).
+
+ Z (input) DOUBLE PRECISION array, dimension ( 2 )
+ The components of the updating vector.
+
+ DELTA (output) DOUBLE PRECISION array, dimension ( 2 )
+ Contains (D(j) - lambda_I) in its j-th component.
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) DOUBLE PRECISION
+ The scalar in the symmetric updating formula.
+
+ DSIGMA (output) DOUBLE PRECISION
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( 2 )
+ WORK contains (D(j) + sigma_I) in its j-th component.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ delsq = del * (d__[2] + d__[1]);
+ if (*i__ == 1) {
+ w = *rho * 4. * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.) - z__[1] *
+ z__[1] / (d__[1] * 3. + d__[2])) / del + 1.;
+ if (w > 0.) {
+ b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * delsq;
+
+/*
+ B > ZERO, always
+
+ The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 )
+*/
+
+ tau = c__ * 2. / (b + sqrt((d__1 = b * b - c__ * 4., abs(d__1))));
+
+/* The following TAU is DSIGMA - D( 1 ) */
+
+ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
+ *dsigma = d__[1] + tau;
+ delta[1] = -tau;
+ delta[2] = del - tau;
+ work[1] = d__[1] * 2. + tau;
+ work[2] = d__[1] + tau + d__[2];
+/*
+ DELTA( 1 ) = -Z( 1 ) / TAU
+ DELTA( 2 ) = Z( 2 ) / ( DEL-TAU )
+*/
+ } else {
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = c__ * -2. / (b + sqrt(b * b + c__ * 4.));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.)) / 2.;
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt((d__1 = d__[2] * d__[2] + tau, abs(d__1)));
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+*/
+ }
+/*
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ } else {
+
+/* Now I=2 */
+
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.) {
+ tau = (b + sqrt(b * b + c__ * 4.)) / 2.;
+ } else {
+ tau = c__ * 2. / (-b + sqrt(b * b + c__ * 4.));
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2. + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ }
+ return 0;
+
+/* End of DLASD5 */
+
+} /* dlasd5_ */
+
+/* Subroutine */ int dlasd6_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, doublereal *d__, doublereal *vf, doublereal *vl,
+ doublereal *alpha, doublereal *beta, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *poles, doublereal *difl, doublereal *
+ difr, doublereal *z__, integer *k, doublereal *c__, doublereal *s,
+ doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasd7_(integer *, integer *, integer *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), dlasd8_(
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlamrg_(integer *, integer *,
+ doublereal *, integer *, integer *, integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal orgnrm;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD6 computes the SVD of an updated upper bidiagonal matrix B
+ obtained by merging two smaller ones by appending a row. This
+ routine is used only for the problem which requires all singular
+ values and optionally singular vector matrices in factored form.
+ B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE.
+ A related subroutine, DLASD1, handles the case in which all singular
+ values and singular vectors of the bidiagonal matrix are desired.
+
+ DLASD6 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The singular values of B can be computed using D1, D2, the first
+ components of all the right singular vectors of the lower block, and
+ the last components of all the right singular vectors of the upper
+ block. These components are stored and updated in VF and VL,
+ respectively, in DLASD6. Hence U and VT are not explicitly
+ referenced.
+
+ The singular values are stored in D. The algorithm consists of two
+ stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or if there is a zero
+ in the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine DLASD7.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the roots of the
+ secular equation via the routine DLASD4 (as called by DLASD8).
+ This routine also updates VF and VL and computes the distances
+ between the updated singular values and the old singular
+ values.
+
+ DLASD6 is called from DLASDA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( NL+NR+1 ).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block, and D(NL+2:N) contains the singular values
+ of the lower block. On exit D(1:N) contains the singular
+ values of the modified matrix.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors of
+ the lower block. On exit, VL contains the last components of
+ all right singular vectors of the bidiagonal matrix.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ IDXQ (output) INTEGER array, dimension ( N )
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM and POLES, must be at least N.
+
+ POLES (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ On exit, POLES(1,*) is an array containing the new singular
+ values obtained from solving the secular equation, and
+ POLES(2,*) is an array containing the poles in the secular
+ equation. Not referenced if ICOMPQ = 0.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( N )
+ On exit, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ On exit, DIFR(I, 1) is the distance between I-th updated
+ (undeflated) singular value and the I+1-th (undeflated) old
+ singular value.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ See DLASD8 for details on DIFL and DIFR.
+
+ Z (output) DOUBLE PRECISION array, dimension ( M )
+ The first elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (output) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension ( 4 * M )
+
+ IWORK (workspace) INTEGER array, dimension ( 3 * N )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --vf;
+ --vl;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --difr;
+ --z__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -14;
+ } else if (*ldgnum < n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD6", &i__1);
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in DLASD7 and DLASD8.
+*/
+
+ isigma = 1;
+ iw = isigma + n;
+ ivfw = iw + m;
+ ivlw = ivfw + m;
+
+ idx = 1;
+ idxc = idx + n;
+ idxp = idxc + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ orgnrm = max(d__1,d__2);
+ d__[*nl + 1] = 0.;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((d__1 = d__[i__], abs(d__1)) > orgnrm) {
+ orgnrm = (d__1 = d__[i__], abs(d__1));
+ }
+/* L10: */
+ }
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &n, &c__1, &d__[1], &n,
+ info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Sort and Deflate singular values. */
+
+ dlasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
+ work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
+ iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
+ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
+ info);
+
+/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
+
+ dlasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
+ ldgnum, &work[isigma], &work[iw], info);
+
+/* Save the poles if ICOMPQ = 1. */
+
+ if (*icompq == 1) {
+ dcopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
+ dcopy_(k, &work[isigma], &c__1, &poles[((poles_dim1) << (1)) + 1], &
+ c__1);
+ }
+
+/* Unscale. */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, &n, &c__1, &d__[1], &n,
+ info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = *k;
+ n2 = n - *k;
+ dlamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of DLASD6 */
+
+} /* dlasd6_ */
+
+/* Subroutine */ int dlasd7_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *k, doublereal *d__, doublereal *z__,
+ doublereal *zw, doublereal *vf, doublereal *vfw, doublereal *vl,
+ doublereal *vlw, doublereal *alpha, doublereal *beta, doublereal *
+ dsigma, integer *idx, integer *idxp, integer *idxq, integer *perm,
+ integer *givptr, integer *givcol, integer *ldgcol, doublereal *givnum,
+ integer *ldgnum, doublereal *c__, doublereal *s, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static integer i__, j, m, n, k2;
+ static doublereal z1;
+ static integer jp;
+ static doublereal eps, tau, tol;
+ static integer nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int drot_(integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *);
+ static integer idxjp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer jprev;
+
+ extern /* Subroutine */ int dlamrg_(integer *, integer *, doublereal *,
+ integer *, integer *, integer *), xerbla_(char *, integer *);
+ static doublereal hlftol;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD7 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem. There
+ are two ways in which deflation can occur: when two or more singular
+ values are close together or if there is a tiny entry in the Z
+ vector. For each such occurrence the order of the related
+ secular equation problem is reduced by one.
+
+ DLASD7 is called from DLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper
+ bidiagonal matrix in compact form.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix, this is
+ the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( N )
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ Z (output) DOUBLE PRECISION array, dimension ( M )
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ ZW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for Z.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VFW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for VF.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors
+ of the lower block. On exit, VL contains the last components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VLW (workspace) DOUBLE PRECISION array, dimension ( M )
+ Workspace for VL.
+
+ ALPHA (input) DOUBLE PRECISION
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) DOUBLE PRECISION
+ Contains the off-diagonal element associated with the added
+ row.
+
+ DSIGMA (output) DOUBLE PRECISION array, dimension ( N )
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ IDX (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXP (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDXQ (input) INTEGER array, dimension ( N )
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first half of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each singular block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) DOUBLE PRECISION array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM, must be at least N.
+
+ C (output) DOUBLE PRECISION
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) DOUBLE PRECISION
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --zw;
+ --vf;
+ --vfw;
+ --vl;
+ --vlw;
+ --dsigma;
+ --idx;
+ --idxp;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -22;
+ } else if (*ldgnum < n) {
+ *info = -24;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD7", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+ if (*icompq == 1) {
+ *givptr = 0;
+ }
+
+/*
+ Generate the first part of the vector Z and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vl[nlp1];
+ vl[nlp1] = 0.;
+ tau = vf[nlp1];
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vl[i__];
+ vl[i__] = 0.;
+ vf[i__ + 1] = vf[i__];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+ vf[1] = tau;
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vf[i__];
+ vf[i__] = 0.;
+/* L20: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L30: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ zw[i__] = z__[idxq[i__]];
+ vfw[i__] = vf[idxq[i__]];
+ vlw[i__] = vl[idxq[i__]];
+/* L40: */
+ }
+
+ dlamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = zw[idxi];
+ vf[i__] = vfw[idxi];
+ vl[i__] = vlw[idxi];
+/* L50: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ eps = EPSILON;
+/* Computing MAX */
+ d__1 = abs(*alpha), d__2 = abs(*beta);
+ tol = max(d__1,d__2);
+/* Computing MAX */
+ d__2 = (d__1 = d__[n], abs(d__1));
+ tol = eps * 64. * max(d__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ if (j == n) {
+ goto L100;
+ }
+ } else {
+ jprev = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ j = jprev;
+L80:
+ ++j;
+ if (j > n) {
+ goto L90;
+ }
+ if ((d__1 = z__[j], abs(d__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((d__1 = d__[j] - d__[jprev], abs(d__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ *s = z__[jprev];
+ *c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = dlapy2_(c__, s);
+ z__[j] = tau;
+ z__[jprev] = 0.;
+ *c__ /= tau;
+ *s = -(*s) / tau;
+
+/* Record the appropriate Givens rotation */
+
+ if (*icompq == 1) {
+ ++(*givptr);
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ givcol[*givptr + ((givcol_dim1) << (1))] = idxjp;
+ givcol[*givptr + givcol_dim1] = idxj;
+ givnum[*givptr + ((givnum_dim1) << (1))] = *c__;
+ givnum[*givptr + givnum_dim1] = *s;
+ }
+ drot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
+ drot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L80;
+L90:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L100:
+
+/*
+ Sort the singular values into DSIGMA. The singular values which
+ were not deflated go into the first K slots of DSIGMA, except
+ that DSIGMA(1) is treated separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ vfw[j] = vf[jp];
+ vlw[j] = vl[jp];
+/* L110: */
+ }
+ if (*icompq == 1) {
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ perm[j] = idxq[idx[jp] + 1];
+ if (perm[j] <= nlp1) {
+ --perm[j];
+ }
+/* L120: */
+ }
+ }
+
+/*
+ The deflated singular values go back into the last N - K slots of
+ D.
+*/
+
+ i__1 = n - *k;
+ dcopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/*
+ Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and
+ VL(M).
+*/
+
+ dsigma[1] = 0.;
+ hlftol = tol / 2.;
+ if (abs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = dlapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ *c__ = 1.;
+ *s = 0.;
+ z__[1] = tol;
+ } else {
+ *c__ = z1 / z__[1];
+ *s = -z__[m] / z__[1];
+ }
+ drot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
+ drot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
+ } else {
+ if (abs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Restore Z, VF, and VL. */
+
+ i__1 = *k - 1;
+ dcopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
+ i__1 = n - 1;
+ dcopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
+
+ return 0;
+
+/* End of DLASD7 */
+
+} /* dlasd7_ */
+
+/* Subroutine */ int dlasd8_(integer *icompq, integer *k, doublereal *d__,
+ doublereal *z__, doublereal *vf, doublereal *vl, doublereal *difl,
+ doublereal *difr, integer *lddifr, doublereal *dsigma, doublereal *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer difr_dim1, difr_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal dj, rho;
+ static integer iwk1, iwk2, iwk3;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal temp;
+ extern doublereal dnrm2_(integer *, doublereal *, integer *);
+ static integer iwk2i, iwk3i;
+ static doublereal diflj, difrj, dsigj;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ extern doublereal dlamc3_(doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasd4_(integer *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *), dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *),
+ xerbla_(char *, integer *);
+ static doublereal dsigjp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASD8 finds the square roots of the roots of the secular equation,
+ as defined by the values in DSIGMA and Z. It makes the appropriate
+ calls to DLASD4, and stores, for each element in D, the distance
+ to its two nearest poles (elements in DSIGMA). It also updates
+ the arrays VF and VL, the first and last components of all the
+ right singular vectors of the original bidiagonal matrix.
+
+ DLASD8 is called from DLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form in the calling routine:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved
+ by DLASD4. K >= 1.
+
+ D (output) DOUBLE PRECISION array, dimension ( K )
+ On output, D contains the updated singular values.
+
+ Z (input) DOUBLE PRECISION array, dimension ( K )
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ VF (input/output) DOUBLE PRECISION array, dimension ( K )
+ On entry, VF contains information passed through DBEDE8.
+ On exit, VF contains the first K components of the first
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ VL (input/output) DOUBLE PRECISION array, dimension ( K )
+ On entry, VL contains information passed through DBEDE8.
+ On exit, VL contains the first K components of the last
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( K )
+ On exit, DIFL(I) = D(I) - DSIGMA(I).
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and
+ dimension ( K ) if ICOMPQ = 0.
+ On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not
+ defined and will not be referenced.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ LDDIFR (input) INTEGER
+ The leading dimension of DIFR, must be at least K.
+
+ DSIGMA (input) DOUBLE PRECISION array, dimension ( K )
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension at least 3 * K
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --vf;
+ --vl;
+ --difl;
+ difr_dim1 = *lddifr;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ --dsigma;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*k < 1) {
+ *info = -2;
+ } else if (*lddifr < *k) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASD8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = abs(z__[1]);
+ difl[1] = d__[1];
+ if (*icompq == 1) {
+ difl[2] = 1.;
+ difr[((difr_dim1) << (1)) + 1] = 1.;
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = dlamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L10: */
+ }
+
+/* Book keeping. */
+
+ iwk1 = 1;
+ iwk2 = iwk1 + *k;
+ iwk3 = iwk2 + *k;
+ iwk2i = iwk2 - 1;
+ iwk3i = iwk3 - 1;
+
+/* Normalize Z. */
+
+ rho = dnrm2_(k, &z__[1], &c__1);
+ dlascl_("G", &c__0, &c__0, &rho, &c_b2865, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Initialize WORK(IWK3). */
+
+ dlaset_("A", k, &c__1, &c_b2865, &c_b2865, &work[iwk3], k);
+
+/*
+ Compute the updated singular values, the arrays DIFL, DIFR,
+ and the updated Z.
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dlasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
+ iwk2], info);
+
+/* If the root finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+ work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
+ difl[j] = -work[j];
+ difr[j + difr_dim1] = -work[j + 1];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L20: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__2 = sqrt((d__1 = work[iwk3i + i__], abs(d__1)));
+ z__[i__] = d_sign(&d__2, &z__[i__]);
+/* L50: */
+ }
+
+/* Update VF and VL. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = d__[j];
+ dsigj = -dsigma[j];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -dsigma[j + 1];
+ }
+ work[j] = -z__[j] / diflj / (dsigma[j] + dj);
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigj) - diflj) / (
+ dsigma[i__] + dj);
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (dlamc3_(&dsigma[i__], &dsigjp) + difrj) /
+ (dsigma[i__] + dj);
+/* L70: */
+ }
+ temp = dnrm2_(k, &work[1], &c__1);
+ work[iwk2i + j] = ddot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
+ work[iwk3i + j] = ddot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
+ if (*icompq == 1) {
+ difr[j + ((difr_dim1) << (1))] = temp;
+ }
+/* L80: */
+ }
+
+ dcopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
+ dcopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
+
+ return 0;
+
+/* End of DLASD8 */
+
+} /* dlasd8_ */
+
+/* Subroutine */ int dlasda_(integer *icompq, integer *smlsiz, integer *n,
+ integer *sqre, doublereal *d__, doublereal *e, doublereal *u, integer
+ *ldu, doublereal *vt, integer *k, doublereal *difl, doublereal *difr,
+ doublereal *z__, doublereal *poles, integer *givptr, integer *givcol,
+ integer *ldgcol, integer *perm, doublereal *givnum, doublereal *c__,
+ doublereal *s, doublereal *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc,
+ nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
+ static doublereal beta;
+ static integer idxq, nlvl;
+ static doublereal alpha;
+ static integer inode, ndiml, ndimr, idxqi, itemp;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer sqrei;
+ extern /* Subroutine */ int dlasd6_(integer *, integer *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *, integer *);
+ static integer nwork1, nwork2;
+ extern /* Subroutine */ int dlasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, doublereal *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *), dlasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *), dlaset_(
+ char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *), xerbla_(char *, integer *);
+ static integer smlszp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, DLASDA computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M matrix
+ B with diagonal D and offdiagonal E, where M = N + SQRE. The
+ algorithm computes the singular values in the SVD B = U * S * VT.
+ The orthogonal matrices U and VT are optionally computed in
+ compact form.
+
+ A related subroutine, DLASD0, computes the singular values and
+ the singular vectors in explicit form.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper bidiagonal
+ matrix in compact form.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row dimension of the upper bidiagonal matrix. This is
+ also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N + 1.
+
+ D (input/output) DOUBLE PRECISION array, dimension ( N )
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit D, if INFO = 0, contains its singular values.
+
+ E (input) DOUBLE PRECISION array, dimension ( M-1 )
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) DOUBLE PRECISION array,
+ dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR, POLES,
+ GIVNUM, and Z.
+
+ VT (output) DOUBLE PRECISION array,
+ dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ K (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th
+ secular equation on the computation tree.
+
+ DIFL (output) DOUBLE PRECISION array, dimension ( LDU, NLVL ),
+ where NLVL = floor(log_2 (N/SMLSIZ))).
+
+ DIFR (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1)
+ record distances between singular values on the I-th
+ level and singular values on the (I -1)-th level, and
+ DIFR(1:N, 2 * I ) contains the normalizing factors for
+ the right singular vector matrix. See DLASD8 for details.
+
+ Z (output) DOUBLE PRECISION array,
+ dimension ( LDU, NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ The first K elements of Z(1, I) contain the components of
+ the deflation-adjusted updating row vector for subproblems
+ on the I-th level.
+
+ POLES (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and
+ POLES(1, 2*I) contain the new and old singular values
+ involved in the secular equations on the I-th level.
+
+ GIVPTR (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1, and not referenced if
+ ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records
+ the number of Givens rotations performed on the I-th
+ problem on the computation tree.
+
+ GIVCOL (output) INTEGER array,
+ dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations
+ of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (output) INTEGER array,
+ dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records
+ permutations done on the I-th level of the computation tree.
+
+ GIVNUM (output) DOUBLE PRECISION array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S-
+ values of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ C (output) DOUBLE PRECISION array,
+ dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1 and the I-th subproblem is not square, on exit,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (output) DOUBLE PRECISION array, dimension ( N ) if
+ ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1
+ and the I-th subproblem is not square, on exit, S( I )
+ contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)).
+
+ IWORK (workspace) INTEGER array.
+ Dimension must be at least (7 * N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldu < *n + *sqre) {
+ *info = -8;
+ } else if (*ldgcol < *n) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDA", &i__1);
+ return 0;
+ }
+
+ m = *n + *sqre;
+
+/* If the input matrix is too small, call DLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ if (*icompq == 0) {
+ dlasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
+ work[1], info);
+ } else {
+ dlasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
+ info);
+ }
+ return 0;
+ }
+
+/* Book-keeping and set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+
+ ncc = 0;
+ nru = 0;
+
+ smlszp = *smlsiz + 1;
+ vf = 1;
+ vl = vf + m;
+ nwork1 = vl + m;
+ nwork2 = nwork1 + smlszp * smlszp;
+
+ dlasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ for the nodes on bottom level of the tree, solve
+ their subproblems by DLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ idxqi = idxq + nlf - 2;
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ sqrei = 1;
+ if (*icompq == 0) {
+ dlaset_("A", &nlp1, &nlp1, &c_b2879, &c_b2865, &work[nwork1], &
+ smlszp);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
+ work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
+ &nl, &work[nwork2], info);
+ itemp = nwork1 + nl * smlszp;
+ dcopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nl, &nl, &c_b2879, &c_b2865, &u[nlf + u_dim1], ldu);
+ dlaset_("A", &nlp1, &nlp1, &c_b2879, &c_b2865, &vt[nlf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
+ vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L10: */
+ }
+ if (i__ == nd && *sqre == 0) {
+ sqrei = 0;
+ } else {
+ sqrei = 1;
+ }
+ idxqi += nlp1;
+ vfi += nlp1;
+ vli += nlp1;
+ nrp1 = nr + sqrei;
+ if (*icompq == 0) {
+ dlaset_("A", &nrp1, &nrp1, &c_b2879, &c_b2865, &work[nwork1], &
+ smlszp);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
+ work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
+ &nr, &work[nwork2], info);
+ itemp = nwork1 + (nrp1 - 1) * smlszp;
+ dcopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ dlaset_("A", &nr, &nr, &c_b2879, &c_b2865, &u[nrf + u_dim1], ldu);
+ dlaset_("A", &nrp1, &nrp1, &c_b2879, &c_b2865, &vt[nrf + vt_dim1],
+ ldu);
+ dlasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
+ vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
+ u_dim1], ldu, &work[nwork1], info);
+ dcopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
+ dcopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ j = pow_ii(&c__2, &nlvl);
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ idxqi = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ if (*icompq == 0) {
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[
+ perm_offset], &givptr[1], &givcol[givcol_offset],
+ ldgcol, &givnum[givnum_offset], ldu, &poles[
+ poles_offset], &difl[difl_offset], &difr[difr_offset],
+ &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
+ &iwork[iwk], info);
+ } else {
+ --j;
+ dlasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
+ lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
+ givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
+ givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
+ difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
+ difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
+ &s[j], &work[nwork1], &iwork[iwk], info);
+ }
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of DLASDA */
+
+} /* dlasda_ */
+
+/* Subroutine */ int dlasdq_(char *uplo, integer *sqre, integer *n, integer *
+ ncvt, integer *nru, integer *ncc, doublereal *d__, doublereal *e,
+ doublereal *vt, integer *ldvt, doublereal *u, integer *ldu,
+ doublereal *c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal r__, cs, sn;
+ static integer np1, isub;
+ static doublereal smin;
+ static integer sqre1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dswap_(integer *, doublereal *, integer *
+ , doublereal *, integer *);
+ static integer iuplo;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *), xerbla_(char *,
+ integer *), dbdsqr_(char *, integer *, integer *, integer
+ *, integer *, doublereal *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static logical rotate;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASDQ computes the singular value decomposition (SVD) of a real
+ (upper or lower) bidiagonal matrix with diagonal D and offdiagonal
+ E, accumulating the transformations if desired. Letting B denote
+ the input bidiagonal matrix, the algorithm computes orthogonal
+ matrices Q and P such that B = Q * S * P' (P' denotes the transpose
+ of P). The singular values S are overwritten on D.
+
+ The input matrix U is changed to U * Q if desired.
+ The input matrix VT is changed to P' * VT if desired.
+ The input matrix C is changed to Q' * C if desired.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3, for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ On entry, UPLO specifies whether the input bidiagonal matrix
+ is upper or lower bidiagonal, and wether it is square are
+ not.
+ UPLO = 'U' or 'u' B is upper bidiagonal.
+ UPLO = 'L' or 'l' B is lower bidiagonal.
+
+ SQRE (input) INTEGER
+ = 0: then the input matrix is N-by-N.
+ = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and
+ (N+1)-by-N if UPLU = 'L'.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ N (input) INTEGER
+ On entry, N specifies the number of rows and columns
+ in the matrix. N must be at least 0.
+
+ NCVT (input) INTEGER
+ On entry, NCVT specifies the number of columns of
+ the matrix VT. NCVT must be at least 0.
+
+ NRU (input) INTEGER
+ On entry, NRU specifies the number of rows of
+ the matrix U. NRU must be at least 0.
+
+ NCC (input) INTEGER
+ On entry, NCC specifies the number of columns of
+ the matrix C. NCC must be at least 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the diagonal entries of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in ascending order.
+
+ E (input/output) DOUBLE PRECISION array.
+ dimension is (N-1) if SQRE = 0 and N if SQRE = 1.
+ On entry, the entries of E contain the offdiagonal entries
+ of the bidiagonal matrix whose SVD is desired. On normal
+ exit, E will contain 0. If the algorithm does not converge,
+ D and E will contain the diagonal and superdiagonal entries
+ of a bidiagonal matrix orthogonally equivalent to the one
+ given as input.
+
+ VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT)
+ On entry, contains a matrix which on exit has been
+ premultiplied by P', dimension N-by-NCVT if SQRE = 0
+ and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0).
+
+ LDVT (input) INTEGER
+ On entry, LDVT specifies the leading dimension of VT as
+ declared in the calling (sub) program. LDVT must be at
+ least 1. If NCVT is nonzero LDVT must also be at least N.
+
+ U (input/output) DOUBLE PRECISION array, dimension (LDU, N)
+ On entry, contains a matrix which on exit has been
+ postmultiplied by Q, dimension NRU-by-N if SQRE = 0
+ and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0).
+
+ LDU (input) INTEGER
+ On entry, LDU specifies the leading dimension of U as
+ declared in the calling (sub) program. LDU must be at
+ least max( 1, NRU ) .
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC)
+ On entry, contains an N-by-NCC matrix which on exit
+ has been premultiplied by Q' dimension N-by-NCC if SQRE = 0
+ and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0).
+
+ LDC (input) INTEGER
+ On entry, LDC specifies the leading dimension of C as
+ declared in the calling (sub) program. LDC must be at
+ least 1. If NCC is nonzero, LDC must also be at least N.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+ Workspace. Only referenced if one of NCVT, NRU, or NCC is
+ nonzero, and if N is at least 2.
+
+ INFO (output) INTEGER
+ On exit, a value of 0 indicates a successful exit.
+ If INFO < 0, argument number -INFO is illegal.
+ If INFO > 0, the algorithm did not converge, and INFO
+ specifies how many superdiagonals did not converge.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncvt < 0) {
+ *info = -4;
+ } else if (*nru < 0) {
+ *info = -5;
+ } else if (*ncc < 0) {
+ *info = -6;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -10;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -12;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASDQ", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = ((*ncvt > 0) || (*nru > 0)) || (*ncc > 0);
+ np1 = *n + 1;
+ sqre1 = *sqre;
+
+/*
+ If matrix non-square upper bidiagonal, rotate to be lower
+ bidiagonal. The rotations are on the right.
+*/
+
+ if (iuplo == 1 && sqre1 == 1) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L10: */
+ }
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ e[*n] = 0.;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ iuplo = 2;
+ sqre1 = 0;
+
+/* Update singular vectors if desired. */
+
+ if (*ncvt > 0) {
+ dlasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
+ vt_offset], ldvt);
+ }
+ }
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left.
+*/
+
+ if (iuplo == 2) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L20: */
+ }
+
+/*
+ If matrix (N+1)-by-N lower bidiagonal, one additional
+ rotation is needed.
+*/
+
+ if (sqre1 == 1) {
+ dlartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ }
+
+/* Update singular vectors if desired. */
+
+ if (*nru > 0) {
+ if (sqre1 == 0) {
+ dlasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ } else {
+ dlasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ }
+ }
+ if (*ncc > 0) {
+ if (sqre1 == 0) {
+ dlasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ } else {
+ dlasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ }
+ }
+ }
+
+/*
+ Call DBDSQR to compute the SVD of the reduced real
+ N-by-N upper bidiagonal matrix.
+*/
+
+ dbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
+ u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
+
+/*
+ Sort the singular values into ascending order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I). */
+
+ isub = i__;
+ smin = d__[i__];
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (d__[j] < smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L30: */
+ }
+ if (isub != i__) {
+
+/* Swap singular values and vectors. */
+
+ d__[isub] = d__[i__];
+ d__[i__] = smin;
+ if (*ncvt > 0) {
+ dswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
+ ldvt);
+ }
+ if (*nru > 0) {
+ dswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
+ , &c__1);
+ }
+ if (*ncc > 0) {
+ dswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
+ ;
+ }
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of DLASDQ */
+
+} /* dlasdq_ */
+
+/* Subroutine */ int dlasdt_(integer *n, integer *lvl, integer *nd, integer *
+ inode, integer *ndiml, integer *ndimr, integer *msub)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer i__, il, ir, maxn;
+ static doublereal temp;
+ static integer nlvl, llst, ncrnt;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASDT creates a tree of subproblems for bidiagonal divide and
+ conquer.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the number of diagonal elements of the
+ bidiagonal matrix.
+
+ LVL (output) INTEGER
+ On exit, the number of levels on the computation tree.
+
+ ND (output) INTEGER
+ On exit, the number of nodes on the tree.
+
+ INODE (output) INTEGER array, dimension ( N )
+ On exit, centers of subproblems.
+
+ NDIML (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of left children.
+
+ NDIMR (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of right children.
+
+ MSUB (input) INTEGER.
+ On entry, the maximum row dimension each subproblem at the
+ bottom of the tree can be of.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Find the number of levels on the tree.
+*/
+
+ /* Parameter adjustments */
+ --ndimr;
+ --ndiml;
+ --inode;
+
+ /* Function Body */
+ maxn = max(1,*n);
+ temp = log((doublereal) maxn / (doublereal) (*msub + 1)) / log(2.);
+ *lvl = (integer) temp + 1;
+
+ i__ = *n / 2;
+ inode[1] = i__ + 1;
+ ndiml[1] = i__;
+ ndimr[1] = *n - i__ - 1;
+ il = 0;
+ ir = 1;
+ llst = 1;
+ i__1 = *lvl - 1;
+ for (nlvl = 1; nlvl <= i__1; ++nlvl) {
+
+/*
+ Constructing the tree at (NLVL+1)-st level. The number of
+ nodes created on this level is LLST * 2.
+*/
+
+ i__2 = llst - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ il += 2;
+ ir += 2;
+ ncrnt = llst + i__;
+ ndiml[il] = ndiml[ncrnt] / 2;
+ ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
+ inode[il] = inode[ncrnt] - ndimr[il] - 1;
+ ndiml[ir] = ndimr[ncrnt] / 2;
+ ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
+ inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
+/* L10: */
+ }
+ llst <<= 1;
+/* L20: */
+ }
+ *nd = ((llst) << (1)) - 1;
+
+ return 0;
+
+/* End of DLASDT */
+
+} /* dlasdt_ */
+
+/* Subroutine */ int dlaset_(char *uplo, integer *m, integer *n, doublereal *
+ alpha, doublereal *beta, doublereal *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASET initializes an m-by-n matrix A to BETA on the diagonal and
+ ALPHA on the offdiagonals.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be set.
+ = 'U': Upper triangular part is set; the strictly lower
+ triangular part of A is not changed.
+ = 'L': Lower triangular part is set; the strictly upper
+ triangular part of A is not changed.
+ Otherwise: All of the matrix A is set.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ ALPHA (input) DOUBLE PRECISION
+ The constant to which the offdiagonal elements are to be set.
+
+ BETA (input) DOUBLE PRECISION
+ The constant to which the diagonal elements are to be set.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On exit, the leading m-by-n submatrix of A is set as follows:
+
+ if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n,
+ if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n,
+ otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j,
+
+ and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/*
+ Set the strictly upper triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/*
+ Set the strictly lower triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+
+/* Set the leading m-by-n submatrix to ALPHA. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+/* Set the first min(M,N) diagonal elements to BETA. */
+
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + i__ * a_dim1] = *beta;
+/* L70: */
+ }
+
+ return 0;
+
+/* End of DLASET */
+
+} /* dlaset_ */
+
+/* Subroutine */ int dlasq1_(integer *n, doublereal *d__, doublereal *e,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal eps;
+ extern /* Subroutine */ int dlas2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ static doublereal scale;
+ static integer iinfo;
+ static doublereal sigmn;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static doublereal sigmx;
+ extern /* Subroutine */ int dlasq2_(integer *, doublereal *, integer *);
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlasrt_(
+ char *, integer *, doublereal *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ1 computes the singular values of a real N-by-N bidiagonal
+ matrix with diagonal D and off-diagonal E. The singular values
+ are computed to high relative accuracy, in the absence of
+ denormalization, underflow and overflow. The algorithm was first
+ presented in
+
+ "Accurate singular values and differential qd algorithms" by K. V.
+ Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230,
+ 1994,
+
+ and the present implementation is described in "An implementation of
+ the dqds Algorithm (Positive Case)", LAPACK Working Note.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, D contains the diagonal elements of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in decreasing order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, elements E(1:N-1) contain the off-diagonal elements
+ of the bidiagonal matrix whose SVD is desired.
+ On exit, E is overwritten.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -2;
+ i__1 = -(*info);
+ xerbla_("DLASQ1", &i__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ d__[1] = abs(d__[1]);
+ return 0;
+ } else if (*n == 2) {
+ dlas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
+ d__[1] = sigmx;
+ d__[2] = sigmn;
+ return 0;
+ }
+
+/* Estimate the largest singular value. */
+
+ sigmx = 0.;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = (d__1 = d__[i__], abs(d__1));
+/* Computing MAX */
+ d__2 = sigmx, d__3 = (d__1 = e[i__], abs(d__1));
+ sigmx = max(d__2,d__3);
+/* L10: */
+ }
+ d__[*n] = (d__1 = d__[*n], abs(d__1));
+
+/* Early return if SIGMX is zero (matrix is already diagonal). */
+
+ if (sigmx == 0.) {
+ dlasrt_("D", n, &d__[1], &iinfo);
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ d__1 = sigmx, d__2 = d__[i__];
+ sigmx = max(d__1,d__2);
+/* L20: */
+ }
+
+/*
+ Copy D and E into WORK (in the Z format) and scale (squaring the
+ input data makes scaling by a power of the radix pointless).
+*/
+
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ scale = sqrt(eps / safmin);
+ dcopy_(n, &d__[1], &c__1, &work[1], &c__2);
+ i__1 = *n - 1;
+ dcopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
+ i__1 = ((*n) << (1)) - 1;
+ i__2 = ((*n) << (1)) - 1;
+ dlascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
+ &iinfo);
+
+/* Compute the q's and e's. */
+
+ i__1 = ((*n) << (1)) - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = work[i__];
+ work[i__] = d__1 * d__1;
+/* L30: */
+ }
+ work[*n * 2] = 0.;
+
+ dlasq2_(n, &work[1], info);
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(work[i__]);
+/* L40: */
+ }
+ dlascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
+ iinfo);
+ }
+
+ return 0;
+
+/* End of DLASQ1 */
+
+} /* dlasq1_ */
+
+/* Subroutine */ int dlasq2_(integer *n, doublereal *z__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal d__, e;
+ static integer k;
+ static doublereal s, t;
+ static integer i0, i4, n0, pp;
+ static doublereal eps, tol;
+ static integer ipn4;
+ static doublereal tol2;
+ static logical ieee;
+ static integer nbig;
+ static doublereal dmin__, emin, emax;
+ static integer ndiv, iter;
+ static doublereal qmin, temp, qmax, zmax;
+ static integer splt, nfail;
+ static doublereal desig, trace, sigma;
+ static integer iinfo;
+ extern /* Subroutine */ int dlasq3_(integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, doublereal *,
+ integer *, integer *, integer *, logical *);
+
+ static integer iwhila, iwhilb;
+ static doublereal oldemn, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ2 computes all the eigenvalues of the symmetric positive
+ definite tridiagonal matrix associated with the qd array Z to high
+ relative accuracy are computed to high relative accuracy, in the
+ absence of denormalization, underflow and overflow.
+
+ To see the relation of Z to the tridiagonal matrix, let L be a
+ unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and
+ let U be an upper bidiagonal matrix with 1's above and diagonal
+ Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the
+ symmetric tridiagonal to which it is similar.
+
+ Note : DLASQ2 defines a logical variable, IEEE, which is true
+ on machines which follow ieee-754 floating-point standard in their
+ handling of infinities and NaNs, and false otherwise. This variable
+ is passed to DLASQ3.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ Z (workspace) DOUBLE PRECISION array, dimension ( 4*N )
+ On entry Z holds the qd array. On exit, entries 1 to N hold
+ the eigenvalues in decreasing order, Z( 2*N+1 ) holds the
+ trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If
+ N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 )
+ holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of
+ shifts that failed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if the i-th argument is a scalar and had an illegal
+ value, then INFO = -i, if the i-th argument is an
+ array and the j-entry had an illegal value, then
+ INFO = -(i*100+j)
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ Further Details
+ ===============
+ Local Variables: I0:N0 defines a current unreduced segment of Z.
+ The shifts are accumulated in SIGMA. Iteration count is in ITER.
+ Ping-pong is controlled by PP (alternates between 0 and 1).
+
+ =====================================================================
+
+
+ Test the input arguments.
+ (in case DLASQ2 is not called by DLASQ1)
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+ if (*n < 0) {
+ *info = -1;
+ xerbla_("DLASQ2", &c__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+
+/* 1-by-1 case. */
+
+ if (z__[1] < 0.) {
+ *info = -201;
+ xerbla_("DLASQ2", &c__2);
+ }
+ return 0;
+ } else if (*n == 2) {
+
+/* 2-by-2 case. */
+
+ if ((z__[2] < 0.) || (z__[3] < 0.)) {
+ *info = -2;
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[3] > z__[1]) {
+ d__ = z__[3];
+ z__[3] = z__[1];
+ z__[1] = d__;
+ }
+ z__[5] = z__[1] + z__[2] + z__[3];
+ if (z__[2] > z__[3] * tol2) {
+ t = (z__[1] - z__[3] + z__[2]) * .5;
+ s = z__[3] * (z__[2] / t);
+ if (s <= t) {
+ s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[1] + (s + z__[2]);
+ z__[3] *= z__[1] / t;
+ z__[1] = t;
+ }
+ z__[2] = z__[3];
+ z__[6] = z__[2] + z__[1];
+ return 0;
+ }
+
+/* Check for negative data and compute sums of q's and e's. */
+
+ z__[*n * 2] = 0.;
+ emin = z__[2];
+ qmax = 0.;
+ zmax = 0.;
+ d__ = 0.;
+ e = 0.;
+
+ i__1 = (*n - 1) << (1);
+ for (k = 1; k <= i__1; k += 2) {
+ if (z__[k] < 0.) {
+ *info = -(k + 200);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ } else if (z__[k + 1] < 0.) {
+ *info = -(k + 201);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[k];
+ e += z__[k + 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[k];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[k + 1];
+ emin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = max(qmax,zmax), d__2 = z__[k + 1];
+ zmax = max(d__1,d__2);
+/* L10: */
+ }
+ if (z__[((*n) << (1)) - 1] < 0.) {
+ *info = -(((*n) << (1)) + 199);
+ xerbla_("DLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[((*n) << (1)) - 1];
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[((*n) << (1)) - 1];
+ qmax = max(d__1,d__2);
+ zmax = max(qmax,zmax);
+
+/* Check for diagonality. */
+
+ if (e == 0.) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (1)) - 1];
+/* L20: */
+ }
+ dlasrt_("D", n, &z__[1], &iinfo);
+ z__[((*n) << (1)) - 1] = d__;
+ return 0;
+ }
+
+ trace = d__ + e;
+
+/* Check for zero data. */
+
+ if (trace == 0.) {
+ z__[((*n) << (1)) - 1] = 0.;
+ return 0;
+ }
+
+/* Check whether the machine is IEEE conformable. */
+
+ ieee = ilaenv_(&c__10, "DLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen)
+ 6, (ftnlen)1) == 1 && ilaenv_(&c__11, "DLASQ2", "N", &c__1, &c__2,
+ &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1;
+
+/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
+
+ for (k = (*n) << (1); k >= 2; k += -2) {
+ z__[k * 2] = 0.;
+ z__[((k) << (1)) - 1] = z__[k];
+ z__[((k) << (1)) - 2] = 0.;
+ z__[((k) << (1)) - 3] = z__[k - 1];
+/* L30: */
+ }
+
+ i0 = 1;
+ n0 = *n;
+
+/* Reverse the qd-array, if warranted. */
+
+ if (z__[((i0) << (2)) - 3] * 1.5 < z__[((n0) << (2)) - 3]) {
+ ipn4 = (i0 + n0) << (2);
+ i__1 = (i0 + n0 - 1) << (1);
+ for (i4 = (i0) << (2); i4 <= i__1; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+/* L40: */
+ }
+ }
+
+/* Initial split checking via dqd and Li's test. */
+
+ pp = 0;
+
+ for (k = 1; k <= 2; ++k) {
+
+ d__ = z__[((n0) << (2)) + pp - 3];
+ i__1 = ((i0) << (2)) + pp;
+ for (i4 = ((n0 - 1) << (2)) + pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ d__ = z__[i4 - 3];
+ } else {
+ d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
+ }
+/* L50: */
+ }
+
+/* dqd maps Z to ZZ plus Li's test. */
+
+ emin = z__[((i0) << (2)) + pp + 1];
+ d__ = z__[((i0) << (2)) + pp - 3];
+ i__1 = ((n0 - 1) << (2)) + pp;
+ for (i4 = ((i0) << (2)) + pp; i4 <= i__1; i4 += 4) {
+ z__[i4 - ((pp) << (1)) - 2] = d__ + z__[i4 - 1];
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.;
+ z__[i4 - ((pp) << (1)) - 2] = d__;
+ z__[i4 - ((pp) << (1))] = 0.;
+ d__ = z__[i4 + 1];
+ } else if (safmin * z__[i4 + 1] < z__[i4 - ((pp) << (1)) - 2] &&
+ safmin * z__[i4 - ((pp) << (1)) - 2] < z__[i4 + 1]) {
+ temp = z__[i4 + 1] / z__[i4 - ((pp) << (1)) - 2];
+ z__[i4 - ((pp) << (1))] = z__[i4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[i4 - ((pp) << (1))] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4
+ - ((pp) << (1)) - 2]);
+ d__ = z__[i4 + 1] * (d__ / z__[i4 - ((pp) << (1)) - 2]);
+ }
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - ((pp) << (1))];
+ emin = min(d__1,d__2);
+/* L60: */
+ }
+ z__[((n0) << (2)) - pp - 2] = d__;
+
+/* Now find qmax. */
+
+ qmax = z__[((i0) << (2)) - pp - 2];
+ i__1 = ((n0) << (2)) - pp - 2;
+ for (i4 = ((i0) << (2)) - pp + 2; i4 <= i__1; i4 += 4) {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4];
+ qmax = max(d__1,d__2);
+/* L70: */
+ }
+
+/* Prepare for the next iteration on K. */
+
+ pp = 1 - pp;
+/* L80: */
+ }
+
+ iter = 2;
+ nfail = 0;
+ ndiv = (n0 - i0) << (1);
+
+ i__1 = *n + 1;
+ for (iwhila = 1; iwhila <= i__1; ++iwhila) {
+ if (n0 < 1) {
+ goto L150;
+ }
+
+/*
+ While array unfinished do
+
+ E(N0) holds the value of SIGMA when submatrix in I0:N0
+ splits from the rest of the array, but is negated.
+*/
+
+ desig = 0.;
+ if (n0 == *n) {
+ sigma = 0.;
+ } else {
+ sigma = -z__[((n0) << (2)) - 1];
+ }
+ if (sigma < 0.) {
+ *info = 1;
+ return 0;
+ }
+
+/*
+ Find last unreduced submatrix's top index I0, find QMAX and
+ EMIN. Find Gershgorin-type bound if Q's much greater than E's.
+*/
+
+ emax = 0.;
+ if (n0 > i0) {
+ emin = (d__1 = z__[((n0) << (2)) - 5], abs(d__1));
+ } else {
+ emin = 0.;
+ }
+ qmin = z__[((n0) << (2)) - 3];
+ qmax = qmin;
+ for (i4 = (n0) << (2); i4 >= 8; i4 += -4) {
+ if (z__[i4 - 5] <= 0.) {
+ goto L100;
+ }
+ if (qmin >= emax * 4.) {
+/* Computing MIN */
+ d__1 = qmin, d__2 = z__[i4 - 3];
+ qmin = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = emax, d__2 = z__[i4 - 5];
+ emax = max(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 - 7] + z__[i4 - 5];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 5];
+ emin = min(d__1,d__2);
+/* L90: */
+ }
+ i4 = 4;
+
+L100:
+ i0 = i4 / 4;
+
+/* Store EMIN for passing to DLASQ3. */
+
+ z__[((n0) << (2)) - 1] = emin;
+
+/*
+ Put -(initial shift) into DMIN.
+
+ Computing MAX
+*/
+ d__1 = 0., d__2 = qmin - sqrt(qmin) * 2. * sqrt(emax);
+ dmin__ = -max(d__1,d__2);
+
+/* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */
+
+ pp = 0;
+
+ nbig = (n0 - i0 + 1) * 30;
+ i__2 = nbig;
+ for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
+ if (i0 > n0) {
+ goto L130;
+ }
+
+/* While submatrix unfinished take a good dqds step. */
+
+ dlasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
+ nfail, &iter, &ndiv, &ieee);
+
+ pp = 1 - pp;
+
+/* When EMIN is very small check for splits. */
+
+ if (pp == 0 && n0 - i0 >= 3) {
+ if ((z__[n0 * 4] <= tol2 * qmax) || (z__[((n0) << (2)) - 1] <=
+ tol2 * sigma)) {
+ splt = i0 - 1;
+ qmax = z__[((i0) << (2)) - 3];
+ emin = z__[((i0) << (2)) - 1];
+ oldemn = z__[i0 * 4];
+ i__3 = (n0 - 3) << (2);
+ for (i4 = (i0) << (2); i4 <= i__3; i4 += 4) {
+ if ((z__[i4] <= tol2 * z__[i4 - 3]) || (z__[i4 - 1] <=
+ tol2 * sigma)) {
+ z__[i4 - 1] = -sigma;
+ splt = i4 / 4;
+ qmax = 0.;
+ emin = z__[i4 + 3];
+ oldemn = z__[i4 + 4];
+ } else {
+/* Computing MAX */
+ d__1 = qmax, d__2 = z__[i4 + 1];
+ qmax = max(d__1,d__2);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[i4 - 1];
+ emin = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = oldemn, d__2 = z__[i4];
+ oldemn = min(d__1,d__2);
+ }
+/* L110: */
+ }
+ z__[((n0) << (2)) - 1] = emin;
+ z__[n0 * 4] = oldemn;
+ i0 = splt + 1;
+ }
+ }
+
+/* L120: */
+ }
+
+ *info = 2;
+ return 0;
+
+/* end IWHILB */
+
+L130:
+
+/* L140: */
+ ;
+ }
+
+ *info = 3;
+ return 0;
+
+/* end IWHILA */
+
+L150:
+
+/* Move q's to the front. */
+
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (2)) - 3];
+/* L160: */
+ }
+
+/* Sort and compute sum of eigenvalues. */
+
+ dlasrt_("D", n, &z__[1], &iinfo);
+
+ e = 0.;
+ for (k = *n; k >= 1; --k) {
+ e += z__[k];
+/* L170: */
+ }
+
+/* Store trace, sum(eigenvalues) and information on performance. */
+
+ z__[((*n) << (1)) + 1] = trace;
+ z__[((*n) << (1)) + 2] = e;
+ z__[((*n) << (1)) + 3] = (doublereal) iter;
+/* Computing 2nd power */
+ i__1 = *n;
+ z__[((*n) << (1)) + 4] = (doublereal) ndiv / (doublereal) (i__1 * i__1);
+ z__[((*n) << (1)) + 5] = nfail * 100. / (doublereal) iter;
+ return 0;
+
+/* End of DLASQ2 */
+
+} /* dlasq2_ */
+
+/* Subroutine */ int dlasq3_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *sigma, doublereal *desig,
+ doublereal *qmax, integer *nfail, integer *iter, integer *ndiv,
+ logical *ieee)
+{
+ /* Initialized data */
+
+ static integer ttype = 0;
+ static doublereal dmin1 = 0.;
+ static doublereal dmin2 = 0.;
+ static doublereal dn = 0.;
+ static doublereal dn1 = 0.;
+ static doublereal dn2 = 0.;
+ static doublereal tau = 0.;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal s, t;
+ static integer j4, nn;
+ static doublereal eps, tol;
+ static integer n0in, ipn4;
+ static doublereal tol2, temp;
+ extern /* Subroutine */ int dlasq4_(integer *, integer *, doublereal *,
+ integer *, integer *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *)
+ , dlasq5_(integer *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, logical *), dlasq6_(
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ DLASQ3 checks for deflation, computes a shift (TAU) and calls dqds.
+ In case of failure it changes shifts, and tries again until output
+ is positive.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ SIGMA (output) DOUBLE PRECISION
+ Sum of shifts used in current segment.
+
+ DESIG (input/output) DOUBLE PRECISION
+ Lower order part of SIGMA
+
+ QMAX (input) DOUBLE PRECISION
+ Maximum value of q.
+
+ NFAIL (output) INTEGER
+ Number of times shift was too big.
+
+ ITER (output) INTEGER
+ Number of iterations.
+
+ NDIV (output) INTEGER
+ Number of divisions.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic (passed to DLASQ5).
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+ n0in = *n0;
+ eps = PRECISION;
+ safmin = SAFEMINIMUM;
+ tol = eps * 100.;
+/* Computing 2nd power */
+ d__1 = tol;
+ tol2 = d__1 * d__1;
+
+/* Check for deflation. */
+
+L10:
+
+ if (*n0 < *i0) {
+ return 0;
+ }
+ if (*n0 == *i0) {
+ goto L20;
+ }
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0 == *i0 + 1) {
+ goto L40;
+ }
+
+/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
+
+ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - ((*pp) << (1))
+ - 4] > tol2 * z__[nn - 7]) {
+ goto L30;
+ }
+
+L20:
+
+ z__[((*n0) << (2)) - 3] = z__[((*n0) << (2)) + *pp - 3] + *sigma;
+ --(*n0);
+ goto L10;
+
+/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
+
+L30:
+
+ if (z__[nn - 9] > tol2 * *sigma && z__[nn - ((*pp) << (1)) - 8] > tol2 *
+ z__[nn - 11]) {
+ goto L50;
+ }
+
+L40:
+
+ if (z__[nn - 3] > z__[nn - 7]) {
+ s = z__[nn - 3];
+ z__[nn - 3] = z__[nn - 7];
+ z__[nn - 7] = s;
+ }
+ if (z__[nn - 5] > z__[nn - 3] * tol2) {
+ t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5;
+ s = z__[nn - 3] * (z__[nn - 5] / t);
+ if (s <= t) {
+ s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.) + 1.)));
+ } else {
+ s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[nn - 7] + (s + z__[nn - 5]);
+ z__[nn - 3] *= z__[nn - 7] / t;
+ z__[nn - 7] = t;
+ }
+ z__[((*n0) << (2)) - 7] = z__[nn - 7] + *sigma;
+ z__[((*n0) << (2)) - 3] = z__[nn - 3] + *sigma;
+ *n0 += -2;
+ goto L10;
+
+L50:
+
+/* Reverse the qd-array, if warranted. */
+
+ if ((*dmin__ <= 0.) || (*n0 < n0in)) {
+ if (z__[((*i0) << (2)) + *pp - 3] * 1.5 < z__[((*n0) << (2)) + *pp -
+ 3]) {
+ ipn4 = (*i0 + *n0) << (2);
+ i__1 = (*i0 + *n0 - 1) << (1);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ temp = z__[j4 - 3];
+ z__[j4 - 3] = z__[ipn4 - j4 - 3];
+ z__[ipn4 - j4 - 3] = temp;
+ temp = z__[j4 - 2];
+ z__[j4 - 2] = z__[ipn4 - j4 - 2];
+ z__[ipn4 - j4 - 2] = temp;
+ temp = z__[j4 - 1];
+ z__[j4 - 1] = z__[ipn4 - j4 - 5];
+ z__[ipn4 - j4 - 5] = temp;
+ temp = z__[j4];
+ z__[j4] = z__[ipn4 - j4 - 4];
+ z__[ipn4 - j4 - 4] = temp;
+/* L60: */
+ }
+ if (*n0 - *i0 <= 4) {
+ z__[((*n0) << (2)) + *pp - 1] = z__[((*i0) << (2)) + *pp - 1];
+ z__[((*n0) << (2)) - *pp] = z__[((*i0) << (2)) - *pp];
+ }
+/* Computing MIN */
+ d__1 = dmin2, d__2 = z__[((*n0) << (2)) + *pp - 1];
+ dmin2 = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*i0) << (2)) +
+ *pp - 1], d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2))
+ + *pp + 3];
+ z__[((*n0) << (2)) + *pp - 1] = min(d__1,d__2);
+/* Computing MIN */
+ d__1 = z__[((*n0) << (2)) - *pp], d__2 = z__[((*i0) << (2)) - *pp]
+ , d__1 = min(d__1,d__2), d__2 = z__[((*i0) << (2)) - *pp
+ + 4];
+ z__[((*n0) << (2)) - *pp] = min(d__1,d__2);
+/* Computing MAX */
+ d__1 = *qmax, d__2 = z__[((*i0) << (2)) + *pp - 3], d__1 = max(
+ d__1,d__2), d__2 = z__[((*i0) << (2)) + *pp + 1];
+ *qmax = max(d__1,d__2);
+ *dmin__ = -0.;
+ }
+ }
+
+/*
+ L70:
+
+ Computing MIN
+*/
+ d__1 = z__[((*n0) << (2)) + *pp - 1], d__2 = z__[((*n0) << (2)) + *pp - 9]
+ , d__1 = min(d__1,d__2), d__2 = dmin2 + z__[((*n0) << (2)) - *pp];
+ if ((*dmin__ < 0.) || (safmin * *qmax < min(d__1,d__2))) {
+
+/* Choose a shift. */
+
+ dlasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, &tau, &ttype);
+
+/* Call dqds until DMIN > 0. */
+
+L80:
+
+ dlasq5_(i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, ieee);
+
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+
+/* Check status. */
+
+ if (*dmin__ >= 0. && dmin1 > 0.) {
+
+/* Success. */
+
+ goto L100;
+
+ } else if (*dmin__ < 0. && dmin1 > 0. && z__[((*n0 - 1) << (2)) - *pp]
+ < tol * (*sigma + dn1) && abs(dn) < tol * *sigma) {
+
+/* Convergence hidden by negative DN. */
+
+ z__[((*n0 - 1) << (2)) - *pp + 2] = 0.;
+ *dmin__ = 0.;
+ goto L100;
+ } else if (*dmin__ < 0.) {
+
+/* TAU too big. Select new TAU and try again. */
+
+ ++(*nfail);
+ if (ttype < -22) {
+
+/* Failed twice. Play it safe. */
+
+ tau = 0.;
+ } else if (dmin1 > 0.) {
+
+/* Late failure. Gives excellent shift. */
+
+ tau = (tau + *dmin__) * (1. - eps * 2.);
+ ttype += -11;
+ } else {
+
+/* Early failure. Divide by 4. */
+
+ tau *= .25;
+ ttype += -12;
+ }
+ goto L80;
+ } else if (*dmin__ != *dmin__) {
+
+/* NaN. */
+
+ tau = 0.;
+ goto L80;
+ } else {
+
+/* Possible underflow. Play it safe. */
+
+ goto L90;
+ }
+ }
+
+/* Risk of underflow. */
+
+L90:
+ dlasq6_(i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2);
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+ tau = 0.;
+
+L100:
+ if (tau < *sigma) {
+ *desig += tau;
+ t = *sigma + *desig;
+ *desig -= t - *sigma;
+ } else {
+ t = *sigma + tau;
+ *desig = *sigma - (t - tau) + *desig;
+ }
+ *sigma = t;
+
+ return 0;
+
+/* End of DLASQ3 */
+
+} /* dlasq3_ */
+
+/* Subroutine */ int dlasq4_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, integer *n0in, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dn1, doublereal *dn2,
+ doublereal *tau, integer *ttype)
+{
+ /* Initialized data */
+
+ static doublereal g = 0.;
+
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal s, a2, b1, b2;
+ static integer i4, nn, np;
+ static doublereal gam, gap1, gap2;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ4 computes an approximation TAU to the smallest eigenvalue
+ using values of d from the previous transform.
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ NOIN (input) INTEGER
+ The value of N0 at start of EIGTEST.
+
+ DMIN (input) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (input) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (input) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (input) DOUBLE PRECISION
+ d(N)
+
+ DN1 (input) DOUBLE PRECISION
+ d(N-1)
+
+ DN2 (input) DOUBLE PRECISION
+ d(N-2)
+
+ TAU (output) DOUBLE PRECISION
+ This is the shift.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ Further Details
+ ===============
+ CNST1 = 9/16
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+/*
+ A negative DMIN forces the shift to take that absolute value
+ TTYPE records the type of shift.
+*/
+
+ if (*dmin__ <= 0.) {
+ *tau = -(*dmin__);
+ *ttype = -1;
+ return 0;
+ }
+
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0in == *n0) {
+
+/* No eigenvalues deflated. */
+
+ if ((*dmin__ == *dn) || (*dmin__ == *dn1)) {
+
+ b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
+ b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
+ a2 = z__[nn - 7] + z__[nn - 5];
+
+/* Cases 2 and 3. */
+
+ if (*dmin__ == *dn && *dmin1 == *dn1) {
+ gap2 = *dmin2 - a2 - *dmin2 * .25;
+ if (gap2 > 0. && gap2 > b2) {
+ gap1 = a2 - *dn - b2 / gap2 * b2;
+ } else {
+ gap1 = a2 - *dn - (b1 + b2);
+ }
+ if (gap1 > 0. && gap1 > b1) {
+/* Computing MAX */
+ d__1 = *dn - b1 / gap1 * b1, d__2 = *dmin__ * .5;
+ s = max(d__1,d__2);
+ *ttype = -2;
+ } else {
+ s = 0.;
+ if (*dn > b1) {
+ s = *dn - b1;
+ }
+ if (a2 > b1 + b2) {
+/* Computing MIN */
+ d__1 = s, d__2 = a2 - (b1 + b2);
+ s = min(d__1,d__2);
+ }
+/* Computing MAX */
+ d__1 = s, d__2 = *dmin__ * .333;
+ s = max(d__1,d__2);
+ *ttype = -3;
+ }
+ } else {
+
+/* Case 4. */
+
+ *ttype = -4;
+ s = *dmin__ * .25;
+ if (*dmin__ == *dn) {
+ gam = *dn;
+ a2 = 0.;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b2 = z__[nn - 5] / z__[nn - 7];
+ np = nn - 9;
+ } else {
+ np = nn - ((*pp) << (1));
+ b2 = z__[np - 2];
+ gam = *dn1;
+ if (z__[np - 4] > z__[np - 2]) {
+ return 0;
+ }
+ a2 = z__[np - 4] / z__[np - 2];
+ if (z__[nn - 9] > z__[nn - 11]) {
+ return 0;
+ }
+ b2 = z__[nn - 9] / z__[nn - 11];
+ np = nn - 13;
+ }
+
+/* Approximate contribution to norm squared from I < NN-1. */
+
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = np; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L20;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if ((max(b2,b1) * 100. < a2) || (.563 < a2)) {
+ goto L20;
+ }
+/* L10: */
+ }
+L20:
+ a2 *= 1.05;
+
+/* Rayleigh quotient residual bound. */
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ }
+ } else if (*dmin__ == *dn2) {
+
+/* Case 5. */
+
+ *ttype = -5;
+ s = *dmin__ * .25;
+
+/* Compute contribution to norm squared from I > NN-2. */
+
+ np = nn - ((*pp) << (1));
+ b1 = z__[np - 2];
+ b2 = z__[np - 6];
+ gam = *dn2;
+ if ((z__[np - 8] > b2) || (z__[np - 4] > b1)) {
+ return 0;
+ }
+ a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.);
+
+/* Approximate contribution to norm squared from I < NN-2. */
+
+ if (*n0 - *i0 > 2) {
+ b2 = z__[nn - 13] / z__[nn - 15];
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.) {
+ goto L40;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if ((max(b2,b1) * 100. < a2) || (.563 < a2)) {
+ goto L40;
+ }
+/* L30: */
+ }
+L40:
+ a2 *= 1.05;
+ }
+
+ if (a2 < .563) {
+ s = gam * (1. - sqrt(a2)) / (a2 + 1.);
+ }
+ } else {
+
+/* Case 6, no information to guide us. */
+
+ if (*ttype == -6) {
+ g += (1. - g) * .333;
+ } else if (*ttype == -18) {
+ g = .083250000000000005;
+ } else {
+ g = .25;
+ }
+ s = g * *dmin__;
+ *ttype = -6;
+ }
+
+ } else if (*n0in == *n0 + 1) {
+
+/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
+
+ if (*dmin1 == *dn1 && *dmin2 == *dn2) {
+
+/* Cases 7 and 8. */
+
+ *ttype = -7;
+ s = *dmin1 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L60;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ a2 = b1;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (max(b1,a2) * 100. < b2) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin1 / (d__1 * d__1 + 1.);
+ gap2 = *dmin2 * .5 - a2;
+ if (gap2 > 0. && gap2 > b2 * a2) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ *ttype = -8;
+ }
+ } else {
+
+/* Case 9. */
+
+ s = *dmin1 * .25;
+ if (*dmin1 == *dn1) {
+ s = *dmin1 * .5;
+ }
+ *ttype = -9;
+ }
+
+ } else if (*n0in == *n0 + 2) {
+
+/*
+ Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN.
+
+ Cases 10 and 11.
+*/
+
+ if (*dmin2 == *dn2 && z__[nn - 5] * 2. < z__[nn - 7]) {
+ *ttype = -10;
+ s = *dmin2 * .333;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.) {
+ goto L80;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (b1 * 100. < b2) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ b2 = sqrt(b2 * 1.05);
+/* Computing 2nd power */
+ d__1 = b2;
+ a2 = *dmin2 / (d__1 * d__1 + 1.);
+ gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
+ nn - 9]) - a2;
+ if (gap2 > 0. && gap2 > b2 * a2) {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - a2 * 1.01 * (b2 / gap2) * b2);
+ s = max(d__1,d__2);
+ } else {
+/* Computing MAX */
+ d__1 = s, d__2 = a2 * (1. - b2 * 1.01);
+ s = max(d__1,d__2);
+ }
+ } else {
+ s = *dmin2 * .25;
+ *ttype = -11;
+ }
+ } else if (*n0in > *n0 + 2) {
+
+/* Case 12, more than two eigenvalues deflated. No information. */
+
+ s = 0.;
+ *ttype = -12;
+ }
+
+ *tau = s;
+ return 0;
+
+/* End of DLASQ4 */
+
+} /* dlasq4_ */
+
+/* Subroutine */ int dlasq5_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *tau, doublereal *dmin__, doublereal *dmin1,
+ doublereal *dmin2, doublereal *dn, doublereal *dnm1, doublereal *dnm2,
+ logical *ieee)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal d__;
+ static integer j4, j4p2;
+ static doublereal emin, temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ DLASQ5 computes one dqds transform in ping-pong form, one
+ version for IEEE machines another for non IEEE machines.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ TAU (input) DOUBLE PRECISION
+ This is the shift.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) DOUBLE PRECISION
+ d(N0), the last value of d.
+
+ DNM1 (output) DOUBLE PRECISION
+ d(N0-1).
+
+ DNM2 (output) DOUBLE PRECISION
+ d(N0-2).
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4] - *tau;
+ *dmin__ = d__;
+ *dmin1 = -z__[j4];
+
+ if (*ieee) {
+
+/* Code for IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4] = z__[j4 - 1] * temp;
+/* Computing MIN */
+ d__1 = z__[j4];
+ emin = min(d__1,emin);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ d__ = d__ * temp - *tau;
+ *dmin__ = min(*dmin__,d__);
+ z__[j4 - 1] = z__[j4] * temp;
+/* Computing MIN */
+ d__1 = z__[j4 - 1];
+ emin = min(d__1,emin);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ *dmin__ = min(*dmin__,*dn);
+
+ } else {
+
+/* Code for non IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L30: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (d__ < 0.) {
+ return 0;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L40: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (*dnm2 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (*dnm1 < 0.) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ }
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ5 */
+
+} /* dlasq5_ */
+
+/* Subroutine */ int dlasq6_(integer *i0, integer *n0, doublereal *z__,
+ integer *pp, doublereal *dmin__, doublereal *dmin1, doublereal *dmin2,
+ doublereal *dn, doublereal *dnm1, doublereal *dnm2)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2;
+
+ /* Local variables */
+ static doublereal d__;
+ static integer j4, j4p2;
+ static doublereal emin, temp;
+
+ static doublereal safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ DLASQ6 computes one dqd (shift equal to zero) transform in
+ ping-pong form, with protection against underflow and overflow.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) DOUBLE PRECISION array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) DOUBLE PRECISION
+ Minimum value of d.
+
+ DMIN1 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) DOUBLE PRECISION
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) DOUBLE PRECISION
+ d(N0), the last value of d.
+
+ DNM1 (output) DOUBLE PRECISION
+ d(N0-1).
+
+ DNM2 (output) DOUBLE PRECISION
+ d(N0-2).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ safmin = SAFEMINIMUM;
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4];
+ *dmin__ = d__;
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ d__ = z__[j4 + 1];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
+ - 2] < z__[j4 + 1]) {
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ z__[j4] = z__[j4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4];
+ emin = min(d__1,d__2);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (z__[j4 - 3] == 0.) {
+ z__[j4 - 1] = 0.;
+ d__ = z__[j4 + 2];
+ *dmin__ = d__;
+ emin = 0.;
+ } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
+ - 3] < z__[j4 + 2]) {
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ z__[j4 - 1] = z__[j4] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
+ }
+ *dmin__ = min(*dmin__,d__);
+/* Computing MIN */
+ d__1 = emin, d__2 = z__[j4 - 1];
+ emin = min(d__1,d__2);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dnm1 = z__[j4p2 + 2];
+ *dmin__ = *dnm1;
+ emin = 0.;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dnm1 = *dnm2 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (z__[j4 - 2] == 0.) {
+ z__[j4] = 0.;
+ *dn = z__[j4p2 + 2];
+ *dmin__ = *dn;
+ emin = 0.;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dn = *dnm1 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
+ }
+ *dmin__ = min(*dmin__,*dn);
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of DLASQ6 */
+
+} /* dlasq6_ */
+
+/* Subroutine */ int dlasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, doublereal *c__, doublereal *s, doublereal *a, integer *
+ lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, info;
+ static doublereal temp;
+ extern logical lsame_(char *, char *);
+ static doublereal ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASR performs the transformation
+
+ A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
+
+ A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
+
+ where A is an m by n real matrix and P is an orthogonal matrix,
+ consisting of a sequence of plane rotations determined by the
+ parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
+ and z = n when SIDE = 'R' or 'r' ):
+
+ When DIRECT = 'F' or 'f' ( Forward sequence ) then
+
+ P = P( z - 1 )*...*P( 2 )*P( 1 ),
+
+ and when DIRECT = 'B' or 'b' ( Backward sequence ) then
+
+ P = P( 1 )*P( 2 )*...*P( z - 1 ),
+
+ where P( k ) is a plane rotation matrix for the following planes:
+
+ when PIVOT = 'V' or 'v' ( Variable pivot ),
+ the plane ( k, k + 1 )
+
+ when PIVOT = 'T' or 't' ( Top pivot ),
+ the plane ( 1, k + 1 )
+
+ when PIVOT = 'B' or 'b' ( Bottom pivot ),
+ the plane ( k, z )
+
+ c( k ) and s( k ) must contain the cosine and sine that define the
+ matrix P( k ). The two by two plane rotation part of the matrix
+ P( k ), R( k ), is assumed to be of the form
+
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ This version vectorises across rows of the array A when SIDE = 'L'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ Specifies whether the plane rotation matrix P is applied to
+ A on the left or the right.
+ = 'L': Left, compute A := P*A
+ = 'R': Right, compute A:= A*P'
+
+ DIRECT (input) CHARACTER*1
+ Specifies whether P is a forward or backward sequence of
+ plane rotations.
+ = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
+ = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
+
+ PIVOT (input) CHARACTER*1
+ Specifies the plane for which P(k) is a plane rotation
+ matrix.
+ = 'V': Variable pivot, the plane (k,k+1)
+ = 'T': Top pivot, the plane (1,k+1)
+ = 'B': Bottom pivot, the plane (k,z)
+
+ M (input) INTEGER
+ The number of rows of the matrix A. If m <= 1, an immediate
+ return is effected.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. If n <= 1, an
+ immediate return is effected.
+
+ C, S (input) DOUBLE PRECISION arrays, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ c(k) and s(k) contain the cosine and sine that define the
+ matrix P(k). The two by two plane rotation part of the
+ matrix P(k), R(k), is assumed to be of the form
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ The m by n matrix A. On exit, A is overwritten by P*A if
+ SIDE = 'R' or by A*P' if SIDE = 'L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! ((lsame_(side, "L")) || (lsame_(side, "R")))) {
+ info = 1;
+ } else if (! (((lsame_(pivot, "V")) || (lsame_(
+ pivot, "T"))) || (lsame_(pivot, "B")))) {
+ info = 2;
+ } else if (! ((lsame_(direct, "F")) || (lsame_(
+ direct, "B")))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("DLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.) || (stemp != 0.)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLASR */
+
+} /* dlasr_ */
+
+/* Subroutine */ int dlasrt_(char *id, integer *n, doublereal *d__, integer *
+ info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static doublereal d1, d2, d3;
+ static integer dir;
+ static doublereal tmp;
+ static integer endd;
+ extern logical lsame_(char *, char *);
+ static integer stack[64] /* was [2][32] */;
+ static doublereal dmnmx;
+ static integer start;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static integer stkpnt;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ Sort the numbers in D in increasing order (if ID = 'I') or
+ in decreasing order (if ID = 'D' ).
+
+ Use Quick Sort, reverting to Insertion sort on arrays of
+ size <= 20. Dimension of STACK limits N to about 2**32.
+
+ Arguments
+ =========
+
+ ID (input) CHARACTER*1
+ = 'I': sort D in increasing order;
+ = 'D': sort D in decreasing order.
+
+ N (input) INTEGER
+ The length of the array D.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the array to be sorted.
+ On exit, D has been sorted into increasing order
+ (D(1) <= ... <= D(N) ) or into decreasing order
+ (D(1) >= ... >= D(N) ), depending on ID.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input paramters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ dir = -1;
+ if (lsame_(id, "D")) {
+ dir = 0;
+ } else if (lsame_(id, "I")) {
+ dir = 1;
+ }
+ if (dir == -1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLASRT", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ stkpnt = 1;
+ stack[0] = 1;
+ stack[1] = *n;
+L10:
+ start = stack[((stkpnt) << (1)) - 2];
+ endd = stack[((stkpnt) << (1)) - 1];
+ --stkpnt;
+ if (endd - start <= 20 && endd - start > 0) {
+
+/* Do Insertion sort on D( START:ENDD ) */
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] > d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ } else {
+
+/* Sort into increasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] < d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ ;
+ }
+
+ }
+
+ } else if (endd - start > 20) {
+
+/*
+ Partition D( START:ENDD ) and stack parts, largest one first
+
+ Choose partition entry as median of 3
+*/
+
+ d1 = d__[start];
+ d2 = d__[endd];
+ i__ = (start + endd) / 2;
+ d3 = d__[i__];
+ if (d1 < d2) {
+ if (d3 < d1) {
+ dmnmx = d1;
+ } else if (d3 < d2) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d2;
+ }
+ } else {
+ if (d3 < d2) {
+ dmnmx = d2;
+ } else if (d3 < d1) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d1;
+ }
+ }
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L60:
+L70:
+ --j;
+ if (d__[j] < dmnmx) {
+ goto L70;
+ }
+L80:
+ ++i__;
+ if (d__[i__] > dmnmx) {
+ goto L80;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L60;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ } else {
+
+/* Sort into increasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L90:
+L100:
+ --j;
+ if (d__[j] > dmnmx) {
+ goto L100;
+ }
+L110:
+ ++i__;
+ if (d__[i__] < dmnmx) {
+ goto L110;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L90;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ }
+ }
+ if (stkpnt > 0) {
+ goto L10;
+ }
+ return 0;
+
+/* End of DLASRT */
+
+} /* dlasrt_ */
+
+/* Subroutine */ int dlassq_(integer *n, doublereal *x, integer *incx,
+ doublereal *scale, doublereal *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer ix;
+ static doublereal absxi;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASSQ returns the values scl and smsq such that
+
+ ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
+
+ where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is
+ assumed to be non-negative and scl returns the value
+
+ scl = max( scale, abs( x( i ) ) ).
+
+ scale and sumsq must be supplied in SCALE and SUMSQ and
+ scl and smsq are overwritten on SCALE and SUMSQ respectively.
+
+ The routine makes only one pass through the vector x.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of elements to be used from the vector X.
+
+ X (input) DOUBLE PRECISION array, dimension (N)
+ The vector for which a scaled sum of squares is computed.
+ x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
+
+ INCX (input) INTEGER
+ The increment between successive values of the vector X.
+ INCX > 0.
+
+ SCALE (input/output) DOUBLE PRECISION
+ On entry, the value scale in the equation above.
+ On exit, SCALE is overwritten with scl , the scaling factor
+ for the sum of squares.
+
+ SUMSQ (input/output) DOUBLE PRECISION
+ On entry, the value sumsq in the equation above.
+ On exit, SUMSQ is overwritten with smsq , the basic sum of
+ squares from which scl has been factored out.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.) {
+ absxi = (d__1 = x[ix], abs(d__1));
+ if (*scale < absxi) {
+/* Computing 2nd power */
+ d__1 = *scale / absxi;
+ *sumsq = *sumsq * (d__1 * d__1) + 1;
+ *scale = absxi;
+ } else {
+/* Computing 2nd power */
+ d__1 = absxi / *scale;
+ *sumsq += d__1 * d__1;
+ }
+ }
+/* L10: */
+ }
+ }
+ return 0;
+
+/* End of DLASSQ */
+
+} /* dlassq_ */
+
+/* Subroutine */ int dlasv2_(doublereal *f, doublereal *g, doublereal *h__,
+ doublereal *ssmin, doublereal *ssmax, doublereal *snr, doublereal *
+ csr, doublereal *snl, doublereal *csl)
+{
+ /* System generated locals */
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt,
+ clt, crt, slt, srt;
+ static integer pmax;
+ static doublereal temp;
+ static logical swap;
+ static doublereal tsign;
+
+ static logical gasmal;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLASV2 computes the singular value decomposition of a 2-by-2
+ triangular matrix
+ [ F G ]
+ [ 0 H ].
+ On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the
+ smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and
+ right singular vectors for abs(SSMAX), giving the decomposition
+
+ [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ]
+ [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ].
+
+ Arguments
+ =========
+
+ F (input) DOUBLE PRECISION
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) DOUBLE PRECISION
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) DOUBLE PRECISION
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) DOUBLE PRECISION
+ abs(SSMIN) is the smaller singular value.
+
+ SSMAX (output) DOUBLE PRECISION
+ abs(SSMAX) is the larger singular value.
+
+ SNL (output) DOUBLE PRECISION
+ CSL (output) DOUBLE PRECISION
+ The vector (CSL, SNL) is a unit left singular vector for the
+ singular value abs(SSMAX).
+
+ SNR (output) DOUBLE PRECISION
+ CSR (output) DOUBLE PRECISION
+ The vector (CSR, SNR) is a unit right singular vector for the
+ singular value abs(SSMAX).
+
+ Further Details
+ ===============
+
+ Any input parameter may be aliased with any output parameter.
+
+ Barring over/underflow and assuming a guard digit in subtraction, all
+ output quantities are correct to within a few units in the last
+ place (ulps).
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ =====================================================================
+*/
+
+
+ ft = *f;
+ fa = abs(ft);
+ ht = *h__;
+ ha = abs(*h__);
+
+/*
+ PMAX points to the maximum absolute element of matrix
+ PMAX = 1 if F largest in absolute values
+ PMAX = 2 if G largest in absolute values
+ PMAX = 3 if H largest in absolute values
+*/
+
+ pmax = 1;
+ swap = ha > fa;
+ if (swap) {
+ pmax = 3;
+ temp = ft;
+ ft = ht;
+ ht = temp;
+ temp = fa;
+ fa = ha;
+ ha = temp;
+
+/* Now FA .ge. HA */
+
+ }
+ gt = *g;
+ ga = abs(gt);
+ if (ga == 0.) {
+
+/* Diagonal matrix */
+
+ *ssmin = ha;
+ *ssmax = fa;
+ clt = 1.;
+ crt = 1.;
+ slt = 0.;
+ srt = 0.;
+ } else {
+ gasmal = TRUE_;
+ if (ga > fa) {
+ pmax = 2;
+ if (fa / ga < EPSILON) {
+
+/* Case of very large GA */
+
+ gasmal = FALSE_;
+ *ssmax = ga;
+ if (ha > 1.) {
+ *ssmin = fa / (ga / ha);
+ } else {
+ *ssmin = fa / ga * ha;
+ }
+ clt = 1.;
+ slt = ht / gt;
+ srt = 1.;
+ crt = ft / gt;
+ }
+ }
+ if (gasmal) {
+
+/* Normal case */
+
+ d__ = fa - ha;
+ if (d__ == fa) {
+
+/* Copes with infinite F or H */
+
+ l = 1.;
+ } else {
+ l = d__ / fa;
+ }
+
+/* Note that 0 .le. L .le. 1 */
+
+ m = gt / ft;
+
+/* Note that abs(M) .le. 1/macheps */
+
+ t = 2. - l;
+
+/* Note that T .ge. 1 */
+
+ mm = m * m;
+ tt = t * t;
+ s = sqrt(tt + mm);
+
+/* Note that 1 .le. S .le. 1 + 1/macheps */
+
+ if (l == 0.) {
+ r__ = abs(m);
+ } else {
+ r__ = sqrt(l * l + mm);
+ }
+
+/* Note that 0 .le. R .le. 1 + 1/macheps */
+
+ a = (s + r__) * .5;
+
+/* Note that 1 .le. A .le. 1 + abs(M) */
+
+ *ssmin = ha / a;
+ *ssmax = fa * a;
+ if (mm == 0.) {
+
+/* Note that M is very tiny */
+
+ if (l == 0.) {
+ t = d_sign(&c_b5654, &ft) * d_sign(&c_b2865, &gt);
+ } else {
+ t = gt / d_sign(&d__, &ft) + m / t;
+ }
+ } else {
+ t = (m / (s + t) + m / (r__ + l)) * (a + 1.);
+ }
+ l = sqrt(t * t + 4.);
+ crt = 2. / l;
+ srt = t / l;
+ clt = (crt + srt * m) / a;
+ slt = ht / ft * srt / a;
+ }
+ }
+ if (swap) {
+ *csl = srt;
+ *snl = crt;
+ *csr = slt;
+ *snr = clt;
+ } else {
+ *csl = clt;
+ *snl = slt;
+ *csr = crt;
+ *snr = srt;
+ }
+
+/* Correct signs of SSMAX and SSMIN */
+
+ if (pmax == 1) {
+ tsign = d_sign(&c_b2865, csr) * d_sign(&c_b2865, csl) * d_sign(&
+ c_b2865, f);
+ }
+ if (pmax == 2) {
+ tsign = d_sign(&c_b2865, snr) * d_sign(&c_b2865, csl) * d_sign(&
+ c_b2865, g);
+ }
+ if (pmax == 3) {
+ tsign = d_sign(&c_b2865, snr) * d_sign(&c_b2865, snl) * d_sign(&
+ c_b2865, h__);
+ }
+ *ssmax = d_sign(ssmax, &tsign);
+ d__1 = tsign * d_sign(&c_b2865, f) * d_sign(&c_b2865, h__);
+ *ssmin = d_sign(ssmin, &d__1);
+ return 0;
+
+/* End of DLASV2 */
+
+} /* dlasv2_ */
+
+/* Subroutine */ int dlaswp_(integer *n, doublereal *a, integer *lda, integer
+ *k1, integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ static doublereal temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DLASWP performs a series of row interchanges on the matrix A.
+ One row interchange is initiated for each of rows K1 through K2 of A.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of columns of the matrix A.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the matrix of column dimension N to which the row
+ interchanges will be applied.
+ On exit, the permuted matrix.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+
+ K1 (input) INTEGER
+ The first element of IPIV for which a row interchange will
+ be done.
+
+ K2 (input) INTEGER
+ The last element of IPIV for which a row interchange will
+ be done.
+
+ IPIV (input) INTEGER array, dimension (M*abs(INCX))
+ The vector of pivot indices. Only the elements in positions
+ K1 through K2 of IPIV are accessed.
+ IPIV(K) = L implies rows K and L are to be interchanged.
+
+ INCX (input) INTEGER
+ The increment between successive values of IPIV. If IPIV
+ is negative, the pivots are applied in reverse order.
+
+ Further Details
+ ===============
+
+ Modified by
+ R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
+
+ =====================================================================
+
+
+ Interchange row I with row IPIV(I) for each of rows K1 through K2.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = (*n / 32) << (5);
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of DLASWP */
+
+} /* dlaswp_ */
+
+/* Subroutine */ int dlatrd_(char *uplo, integer *n, integer *nb, doublereal *
+ a, integer *lda, doublereal *e, doublereal *tau, doublereal *w,
+ integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, iw;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal alpha;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *), daxpy_(integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *),
+ dsymv_(char *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), dlarfg_(integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DLATRD reduces NB rows and columns of a real symmetric matrix A to
+ symmetric tridiagonal form by an orthogonal similarity
+ transformation Q' * A * Q, and returns the matrices V and W which are
+ needed to apply the transformation to the unreduced part of A.
+
+ If UPLO = 'U', DLATRD reduces the last NB rows and columns of a
+ matrix, of which the upper triangle is supplied;
+ if UPLO = 'L', DLATRD reduces the first NB rows and columns of a
+ matrix, of which the lower triangle is supplied.
+
+ This is an auxiliary routine called by DSYTRD.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ NB (input) INTEGER
+ The number of rows and columns to be reduced.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit:
+ if UPLO = 'U', the last NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements above the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors;
+ if UPLO = 'L', the first NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements below the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= (1,N).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
+ elements of the last NB columns of the reduced matrix;
+ if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
+ the first NB columns of the reduced matrix.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors, stored in
+ TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
+ See Further Details.
+
+ W (output) DOUBLE PRECISION array, dimension (LDW,NB)
+ The n-by-nb matrix W required to update the unreduced part
+ of A.
+
+ LDW (input) INTEGER
+ The leading dimension of the array W. LDW >= max(1,N).
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n) H(n-1) . . . H(n-nb+1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
+ and tau in TAU(i-1).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
+ and tau in TAU(i).
+
+ The elements of the vectors v together form the n-by-nb matrix V
+ which is needed, with W, to apply the transformation to the unreduced
+ part of the matrix, using a symmetric rank-2k update of the form:
+ A := A - V*W' - W*V'.
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5 and nb = 2:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( a a a v4 v5 ) ( d )
+ ( a a v4 v5 ) ( 1 d )
+ ( a 1 v5 ) ( v1 1 a )
+ ( d 1 ) ( v1 v2 a a )
+ ( d ) ( v1 v2 a a a )
+
+ where d denotes a diagonal element of the reduced matrix, a denotes
+ an element of the original matrix that is unchanged, and vi denotes
+ an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b3001, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b2865, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ dgemv_("No transpose", &i__, &i__2, &c_b3001, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b2865, &a[i__ * a_dim1 + 1], &c__1);
+ }
+ if (i__ > 1) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(1:i-2,i)
+*/
+
+ i__2 = i__ - 1;
+ dlarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
+ 1], &c__1, &tau[i__ - 1]);
+ e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
+ a[i__ - 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ dsymv_("Upper", &i__2, &c_b2865, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b2879, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b2879, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1],
+ &c__1, &c_b2865, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b2879, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &w[(iw + 1)
+ * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b2865, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ dscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ alpha = tau[i__ - 1] * -.5 * ddot_(&i__2, &w[iw * w_dim1 + 1],
+ &c__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ daxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[i__ + a_dim1],
+ lda, &w[i__ + w_dim1], ldw, &c_b2865, &a[i__ + i__ *
+ a_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &w[i__ + w_dim1],
+ ldw, &a[i__ + a_dim1], lda, &c_b2865, &a[i__ + i__ *
+ a_dim1], &c__1);
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) +
+ i__ * a_dim1], &c__1, &tau[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ dsymv_("Lower", &i__2, &c_b2865, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b2879, &w[i__ + 1 + i__ * w_dim1], &c__1)
+ ;
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &w[i__ + 1 +
+ w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b2879, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b2865, &
+ w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b2879, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b2865, &
+ w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ dscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ alpha = tau[i__] * -.5 * ddot_(&i__2, &w[i__ + 1 + i__ *
+ w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DLATRD */
+
+} /* dlatrd_ */
+
+/* Subroutine */ int dlauu2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal aii;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLAUU2 computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the unblocked form of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = ddot_(&i__2, &a[i__ + i__ * a_dim1],
+ lda, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ dgemv_("No transpose", &i__2, &i__3, &c_b2865, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ aii, &a[i__ * a_dim1 + 1], &c__1);
+ } else {
+ dscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = ddot_(&i__2, &a[i__ + i__ * a_dim1], &
+ c__1, &a[i__ + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ dgemv_("Transpose", &i__2, &i__3, &c_b2865, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &aii,
+ &a[i__ + a_dim1], lda);
+ } else {
+ dscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DLAUU2 */
+
+} /* dlauu2_ */
+
+/* Subroutine */ int dlauum_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, ib, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dlauu2_(char *, integer *,
+ doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DLAUUM computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the blocked form of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DLAUUM", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ dlauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ dtrmm_("Right", "Upper", "Transpose", "Non-unit", &i__3, &ib,
+ &c_b2865, &a[i__ + i__ * a_dim1], lda, &a[i__ *
+ a_dim1 + 1], lda);
+ dlauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ dgemm_("No transpose", "Transpose", &i__3, &ib, &i__4, &
+ c_b2865, &a[(i__ + ib) * a_dim1 + 1], lda, &a[i__
+ + (i__ + ib) * a_dim1], lda, &c_b2865, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ dsyrk_("Upper", "No transpose", &ib, &i__3, &c_b2865, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b2865, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ dtrmm_("Left", "Lower", "Transpose", "Non-unit", &ib, &i__3, &
+ c_b2865, &a[i__ + i__ * a_dim1], lda, &a[i__ + a_dim1]
+ , lda);
+ dlauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ dgemm_("Transpose", "No transpose", &ib, &i__3, &i__4, &
+ c_b2865, &a[i__ + ib + i__ * a_dim1], lda, &a[i__
+ + ib + a_dim1], lda, &c_b2865, &a[i__ + a_dim1],
+ lda);
+ i__3 = *n - i__ - ib + 1;
+ dsyrk_("Lower", "Transpose", &ib, &i__3, &c_b2865, &a[i__
+ + ib + i__ * a_dim1], lda, &c_b2865, &a[i__ + i__
+ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DLAUUM */
+
+} /* dlauum_ */
+
+/* Subroutine */ int dorg2r_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORG2R generates an m by n real matrix Q with orthonormal columns,
+ which is defined as the first n columns of a product of k elementary
+ reflectors of order m
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by DGEQRF in the first k columns of its array
+ argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ dlarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[l + i__ * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORG2R */
+
+} /* dorg2r_ */
+
+/* Subroutine */ int dorgbr_(char *vect, integer *m, integer *n, integer *k,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dorglq_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *), dorgqr_(integer *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGBR generates one of the real orthogonal matrices Q or P**T
+ determined by DGEBRD when reducing a real matrix A to bidiagonal
+ form: A = Q * B * P**T. Q and P**T are defined as products of
+ elementary reflectors H(i) or G(i) respectively.
+
+ If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
+ is of order M:
+ if m >= k, Q = H(1) H(2) . . . H(k) and DORGBR returns the first n
+ columns of Q, where m >= n >= k;
+ if m < k, Q = H(1) H(2) . . . H(m-1) and DORGBR returns Q as an
+ M-by-M matrix.
+
+ If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T
+ is of order N:
+ if k < n, P**T = G(k) . . . G(2) G(1) and DORGBR returns the first m
+ rows of P**T, where n >= m >= k;
+ if k >= n, P**T = G(n-1) . . . G(2) G(1) and DORGBR returns P**T as
+ an N-by-N matrix.
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ Specifies whether the matrix Q or the matrix P**T is
+ required, as defined in the transformation applied by DGEBRD:
+ = 'Q': generate Q;
+ = 'P': generate P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix Q or P**T to be returned.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q or P**T to be returned.
+ N >= 0.
+ If VECT = 'Q', M >= N >= min(M,K);
+ if VECT = 'P', N >= M >= min(N,K).
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original M-by-K
+ matrix reduced by DGEBRD.
+ If VECT = 'P', the number of rows in the original K-by-N
+ matrix reduced by DGEBRD.
+ K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by DGEBRD.
+ On exit, the M-by-N matrix Q or P**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension
+ (min(M,K)) if VECT = 'Q'
+ (min(N,K)) if VECT = 'P'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i), which determines Q or P**T, as
+ returned by DGEBRD in its array argument TAUQ or TAUP.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,min(M,N)).
+ For optimum performance LWORK >= min(M,N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (((*n < 0) || (wantq && ((*n > *m) || (*n < min(*m,*k))))) || (!
+ wantq && ((*m > *n) || (*m < min(*n,*k))))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ } else {
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (wantq) {
+
+/*
+ Form Q, determined by a call to DGEBRD to reduce an m-by-k
+ matrix
+*/
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ dorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If m < k, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first row and column of Q
+ to those of the unit matrix
+*/
+
+ for (j = *m; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ a[a_dim1 + 1] = 1.;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ dorgqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/*
+ Form P', determined by a call to DGEBRD to reduce a k-by-n
+ matrix
+*/
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ dorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If k >= n, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ row downward, and set the first row and column of P' to
+ those of the unit matrix
+*/
+
+ a[a_dim1 + 1] = 1.;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
+/* L50: */
+ }
+ a[j * a_dim1 + 1] = 0.;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ dorglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGBR */
+
+} /* dorgbr_ */
+
+/* Subroutine */ int dorghr_(integer *n, integer *ilo, integer *ihi,
+ doublereal *a, integer *lda, doublereal *tau, doublereal *work,
+ integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGHR generates a real orthogonal matrix Q which is defined as the
+ product of IHI-ILO elementary reflectors of order N, as returned by
+ DGEHRD:
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix Q. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ ILO and IHI must have the same values as in the previous call
+ of DGEHRD. Q is equal to the unit matrix except in the
+ submatrix Q(ilo+1:ihi,ilo+1:ihi).
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by DGEHRD.
+ On exit, the N-by-N orthogonal matrix Q.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (N-1)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEHRD.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= IHI-ILO.
+ For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "DORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lwkopt = max(1,nh) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+/*
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first ilo and the last n-ihi
+ rows and columns to those of the unit matrix
+*/
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L50: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L70: */
+ }
+ a[j + j * a_dim1] = 1.;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ dorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORGHR */
+
+} /* dorghr_ */
+
+/* Subroutine */ int dorgl2_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublereal d__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *), dlarf_(char *, integer *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGL2 generates an m by n real matrix Q with orthonormal rows,
+ which is defined as the first m rows of a product of k elementary
+ reflectors of order n
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by DGELQF in the first k rows of its array argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ a[j + j * a_dim1] = 1.;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ dlarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ d__1 = -tau[i__];
+ dscal_(&i__1, &d__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ a[i__ + i__ * a_dim1] = 1. - tau[i__];
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[i__ + l * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of DORGL2 */
+
+} /* dorgl2_ */
+
+/* Subroutine */ int dorglq_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorgl2_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGLQ generates an M-by-N real matrix Q with orthonormal rows,
+ which is defined as the first M rows of a product of K elementary
+ reflectors of order N
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by DGELQF in the first k rows of its array argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*m) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGLQ", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGLQ", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk rows are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *n - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ dlarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
+ i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ dorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGLQ */
+
+} /* dorglq_ */
+
+/* Subroutine */ int dorgqr_(integer *m, integer *n, integer *k, doublereal *
+ a, integer *lda, doublereal *tau, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int dorg2r_(integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlarfb_(char *, char *, char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORGQR generates an M-by-N real matrix Q with orthonormal columns,
+ which is defined as the first N columns of a product of K elementary
+ reflectors of order M
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by DGEQRF in the first k columns of its array
+ argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "DORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*n) * nb;
+ work[1] = (doublereal) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "DORGQR", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORGQR", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk columns are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ dorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *m - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ dlarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ dorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (doublereal) iws;
+ return 0;
+
+/* End of DORGQR */
+
+} /* dorgqr_ */
+
+/* Subroutine */ int dorm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORM2L overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGEQLF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQLF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[nq - *k + i__ + i__ * a_dim1];
+ a[nq - *k + i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[nq - *k + i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2L */
+
+} /* dorm2l_ */
+
+/* Subroutine */ int dorm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORM2R overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORM2R */
+
+} /* dorm2r_ */
+
+/* Subroutine */ int dormbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, doublereal *a, integer *lda, doublereal *tau,
+ doublereal *c__, integer *ldc, doublereal *work, integer *lwork,
+ integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dormlq_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static logical notran;
+ extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static logical applyq;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ If VECT = 'Q', DORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ If VECT = 'P', DORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': P * C C * P
+ TRANS = 'T': P**T * C C * P**T
+
+ Here Q and P**T are the orthogonal matrices determined by DGEBRD when
+ reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and
+ P**T are defined as products of elementary reflectors H(i) and G(i)
+ respectively.
+
+ Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
+ order of the orthogonal matrix Q or P**T that is applied.
+
+ If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
+ if nq >= k, Q = H(1) H(2) . . . H(k);
+ if nq < k, Q = H(1) H(2) . . . H(nq-1).
+
+ If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
+ if k < nq, P = G(1) G(2) . . . G(k);
+ if k >= nq, P = G(1) G(2) . . . G(nq-1).
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ = 'Q': apply Q or Q**T;
+ = 'P': apply P or P**T.
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q, Q**T, P or P**T from the Left;
+ = 'R': apply Q, Q**T, P or P**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q or P;
+ = 'T': Transpose, apply Q**T or P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original
+ matrix reduced by DGEBRD.
+ If VECT = 'P', the number of rows in the original
+ matrix reduced by DGEBRD.
+ K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,min(nq,K)) if VECT = 'Q'
+ (LDA,nq) if VECT = 'P'
+ The vectors which define the elementary reflectors H(i) and
+ G(i), whose products determine the matrices Q and P, as
+ returned by DGEBRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If VECT = 'Q', LDA >= max(1,nq);
+ if VECT = 'P', LDA >= max(1,min(nq,K)).
+
+ TAU (input) DOUBLE PRECISION array, dimension (min(nq,K))
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i) which determines Q or P, as returned
+ by DGEBRD in the array argument TAUQ or TAUP.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q
+ or P*C or P**T*C or C*P or C*P**T.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
+ )) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ work[1] = 1.;
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to DGEBRD with nq >= k */
+
+ dormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to DGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+ , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to DGEBRD with nq > k */
+
+ dormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to DGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ dormlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
+ lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
+ lwork, &iinfo);
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMBR */
+
+} /* dormbr_ */
+
+/* Subroutine */ int dorml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static doublereal aii;
+ static logical left;
+ extern /* Subroutine */ int dlarf_(char *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DORML2 overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.;
+ dlarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of DORML2 */
+
+} /* dorml2_ */
+
+/* Subroutine */ int dormlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorml2_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMLQ overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGELQF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGELQF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ dorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMLQ */
+
+} /* dormlq_ */
+
+/* Subroutine */ int dormql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2l_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMQL overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by DGEQLF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQLF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ dorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i+ib-1) . . . H(i+1) H(i)
+*/
+
+ i__4 = nq - *k + i__ + ib - 1;
+ dlarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+ , lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQL */
+
+} /* dormql_ */
+
+/* Subroutine */ int dormqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static doublereal t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int dorm2r_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *), dlarfb_(char
+ *, char *, char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *), dlarft_(char *, char *, integer *, integer *, doublereal
+ *, integer *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMQR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by DGEQRF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ DGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) DOUBLE PRECISION array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DGEQRF.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "DORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DORMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "DORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ dorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ dlarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ dlarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMQR */
+
+} /* dormqr_ */
+
+/* Subroutine */ int dormtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, doublereal *a, integer *lda, doublereal *tau, doublereal *
+ c__, integer *ldc, doublereal *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int dormql_(char *, char *, integer *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *),
+ dormqr_(char *, char *, integer *, integer *, integer *,
+ doublereal *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DORMTR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix of order nq, with nq = m if
+ SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
+ nq-1 elementary reflectors, as returned by DSYTRD:
+
+ if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
+
+ if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A contains elementary reflectors
+ from DSYTRD;
+ = 'L': Lower triangle of A contains elementary reflectors
+ from DSYTRD.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension
+ (LDA,M) if SIDE = 'L'
+ (LDA,N) if SIDE = 'R'
+ The vectors which define the elementary reflectors, as
+ returned by DSYTRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
+
+ TAU (input) DOUBLE PRECISION array, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by DSYTRD.
+
+ C (input/output) DOUBLE PRECISION array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQL", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "DORMQR", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("DORMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (nq == 1)) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ dormql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
+ &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to DSYTRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ dormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DORMTR */
+
+} /* dormtr_ */
+
+/* Subroutine */ int dpotf2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer j;
+ static doublereal ajj;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DPOTF2 computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U' * U , if UPLO = 'U', or
+ A = L * L', if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the unblocked version of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n by n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U'*U or A = L*L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, the leading minor of order k is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j * a_dim1 + 1], &c__1,
+ &a[j * a_dim1 + 1], &c__1);
+ if (ajj <= 0.) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ dgemv_("Transpose", &i__2, &i__3, &c_b3001, &a[(j + 1) *
+ a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b2865,
+ &a[j + (j + 1) * a_dim1], lda);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - ddot_(&i__2, &a[j + a_dim1], lda, &a[j
+ + a_dim1], lda);
+ if (ajj <= 0.) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ dgemv_("No transpose", &i__2, &i__3, &c_b3001, &a[j + 1 +
+ a_dim1], lda, &a[j + a_dim1], lda, &c_b2865, &a[j + 1
+ + j * a_dim1], &c__1);
+ i__2 = *n - j;
+ d__1 = 1. / ajj;
+ dscal_(&i__2, &d__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of DPOTF2 */
+
+} /* dpotf2_ */
+
+/* Subroutine */ int dpotrf_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer j, jb, nb;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dsyrk_(char *, char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *), dpotf2_(char *, integer *,
+ doublereal *, integer *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DPOTRF computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U**T * U, if UPLO = 'U', or
+ A = L * L**T, if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the block version of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U**T*U or A = L*L**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the leading minor of order i is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "DPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code. */
+
+ dpotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Upper", "Transpose", &jb, &i__3, &c_b3001, &a[j *
+ a_dim1 + 1], lda, &c_b2865, &a[j + j * a_dim1], lda);
+ dpotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
+ c_b3001, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
+ a_dim1 + 1], lda, &c_b2865, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
+ i__3, &c_b2865, &a[j + j * a_dim1], lda, &a[j + (
+ j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ dsyrk_("Lower", "No transpose", &jb, &i__3, &c_b3001, &a[j +
+ a_dim1], lda, &c_b2865, &a[j + j * a_dim1], lda);
+ dpotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ dgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
+ c_b3001, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
+ lda, &c_b2865, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ dtrsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
+ jb, &c_b2865, &a[j + j * a_dim1], lda, &a[j + jb
+ + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of DPOTRF */
+
+} /* dpotrf_ */
+
+/* Subroutine */ int dpotri_(char *uplo, integer *n, doublereal *a, integer *
+ lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), dlauum_(
+ char *, integer *, doublereal *, integer *, integer *),
+ dtrtri_(char *, char *, integer *, doublereal *, integer *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DPOTRI computes the inverse of a real symmetric positive definite
+ matrix A using the Cholesky factorization A = U**T*U or A = L*L**T
+ computed by DPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the triangular factor U or L from the Cholesky
+ factorization A = U**T*U or A = L*L**T, as computed by
+ DPOTRF.
+ On exit, the upper or lower triangle of the (symmetric)
+ inverse of A, overwriting the input factor U or L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the (i,i) element of the factor U or L is
+ zero, and the inverse could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ dtrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ dlauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of DPOTRI */
+
+} /* dpotri_ */
+
+/* Subroutine */ int dpotrs_(char *uplo, integer *n, integer *nrhs,
+ doublereal *a, integer *lda, doublereal *b, integer *ldb, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DPOTRS solves a system of linear equations A*X = B with a symmetric
+ positive definite matrix A using the Cholesky factorization
+ A = U**T*U or A = L*L**T computed by DPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) DOUBLE PRECISION array, dimension (LDA,N)
+ The triangular factor U or L from the Cholesky factorization
+ A = U**T*U or A = L*L**T, as computed by DPOTRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ B (input/output) DOUBLE PRECISION array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (upper) {
+
+/*
+ Solve A*X = B where A = U'*U.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ dtrsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b2865, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b2865,
+ &a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A*X = B where A = L*L'.
+
+ Solve L*X = B, overwriting B with X.
+*/
+
+ dtrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b2865,
+ &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ dtrsm_("Left", "Lower", "Transpose", "Non-unit", n, nrhs, &c_b2865, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of DPOTRS */
+
+} /* dpotrs_ */
+
+/* Subroutine */ int dstedc_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *lwork, integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k, m;
+ static doublereal p;
+ static integer ii, end, lgn;
+ static doublereal eps, tiny;
+ extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *,
+ integer *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static integer lwmin;
+ extern /* Subroutine */ int dlaed0_(integer *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, doublereal *, integer *, integer *);
+ static integer start;
+
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlacpy_(char *, integer *, integer
+ *, doublereal *, integer *, doublereal *, integer *),
+ dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
+ doublereal *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *), dlasrt_(char *, integer *, doublereal *, integer *);
+ static integer liwmin, icompz;
+ extern /* Subroutine */ int dsteqr_(char *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *);
+ static doublereal orgnrm;
+ static logical lquery;
+ static integer smlsiz, dtrtrw, storez;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSTEDC computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+ The eigenvectors of a full or band real symmetric matrix can also be
+ found if DSYTRD or DSPTRD or DSBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See DLAED3 for details.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'I': Compute eigenvectors of tridiagonal matrix also.
+ = 'V': Compute eigenvectors of original dense symmetric
+ matrix also. On entry, Z contains the orthogonal
+ matrix used to reduce the original matrix to
+ tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the subdiagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ,N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1.
+ If eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If COMPZ = 'N' or N <= 1 then LWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LWORK must be at least
+ ( 1 + 3*N + 2*N*lg N + 3*N**2 ),
+ where lg( N ) = smallest integer k such
+ that 2**k >= N.
+ If COMPZ = 'I' and N > 1 then LWORK must be at least
+ ( 1 + 4*N + N**2 ).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LIWORK must be at least
+ ( 6 + 6*N + 5*N*lg N ).
+ If COMPZ = 'I' and N > 1 then LIWORK must be at least
+ ( 3 + 5*N ).
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = (*lwork == -1) || (*liwork == -1);
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if ((*n <= 1) || (icompz <= 0)) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ lgn = (integer) (log((doublereal) (*n)) / log(2.));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = ((*n) << (2)) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "DSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ If the following conditional clause is removed, then the routine
+ will use the Divide and Conquer routine to compute only the
+ eigenvalues, which requires (3N + 3N**2) real workspace and
+ (2 + 5N + 2N lg(N)) integer workspace.
+ Since on many architectures DSTERF is much faster than any other
+ algorithm for finding eigenvalues only, it is used here
+ as the default.
+
+ If COMPZ = 'N', use DSTERF to compute the eigenvalues.
+*/
+
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ }
+
+/*
+ If N is smaller than the minimum divide size (SMLSIZ+1), then
+ solve the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompz == 0) {
+ dsterf_(n, &d__[1], &e[1], info);
+ return 0;
+ } else if (icompz == 2) {
+ dsteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ } else {
+ dsteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ }
+ }
+
+/*
+ If COMPZ = 'V', the Z matrix must be stored elsewhere for later
+ use.
+*/
+
+ if (icompz == 1) {
+ storez = *n * *n + 1;
+ } else {
+ storez = 1;
+ }
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b2879, &c_b2865, &z__[z_offset], ldz);
+ }
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.) {
+ return 0;
+ }
+
+ eps = EPSILON;
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L10:
+ if (start <= *n) {
+
+/*
+ Let END be the position of the next subdiagonal entry such that
+ E( END ) <= TINY or END = N if no such subdiagonal exists. The
+ matrix identified by the elements between START and END
+ constitutes an independent sub-problem.
+*/
+
+ end = start;
+L20:
+ if (end < *n) {
+ tiny = eps * sqrt((d__1 = d__[end], abs(d__1))) * sqrt((d__2 =
+ d__[end + 1], abs(d__2)));
+ if ((d__1 = e[end], abs(d__1)) > tiny) {
+ ++end;
+ goto L20;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = end - start + 1;
+ if (m == 1) {
+ start = end + 1;
+ goto L10;
+ }
+ if (m > smlsiz) {
+ *info = smlsiz;
+
+/* Scale. */
+
+ orgnrm = dlanst_("M", &m, &d__[start], &e[start]);
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ dlascl_("G", &c__0, &c__0, &orgnrm, &c_b2865, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ if (icompz == 1) {
+ dtrtrw = 1;
+ } else {
+ dtrtrw = start;
+ }
+ dlaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[dtrtrw +
+ start * z_dim1], ldz, &work[1], n, &work[storez], &iwork[
+ 1], info);
+ if (*info != 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
+ + 1) + start - 1;
+ return 0;
+ }
+
+/* Scale back. */
+
+ dlascl_("G", &c__0, &c__0, &c_b2865, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ if (icompz == 1) {
+
+/*
+ Since QR won't update a Z matrix which is larger than the
+ length of D, we must solve the sub-problem in a workspace and
+ then multiply back into Z.
+*/
+
+ dsteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &work[
+ m * m + 1], info);
+ dlacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
+ storez], n);
+ dgemm_("N", "N", n, &m, &m, &c_b2865, &work[storez], ldz, &
+ work[1], &m, &c_b2879, &z__[start * z_dim1 + 1], ldz);
+ } else if (icompz == 2) {
+ dsteqr_("I", &m, &d__[start], &e[start], &z__[start + start *
+ z_dim1], ldz, &work[1], info);
+ } else {
+ dsterf_(&m, &d__[start], &e[start], info);
+ }
+ if (*info != 0) {
+ *info = start * (*n + 1) + end;
+ return 0;
+ }
+ }
+
+ start = end + 1;
+ goto L10;
+ }
+
+/*
+ endwhile
+
+ If the problem split any number of times, then the eigenvalues
+ will not be properly ordered. Here we permute the eigenvalues
+ (and the associated eigenvectors) into ascending order.
+*/
+
+ if (m != *n) {
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L30: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
+ + 1], &c__1);
+ }
+/* L40: */
+ }
+ }
+ }
+
+ work[1] = (doublereal) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of DSTEDC */
+
+} /* dstedc_ */
+
+/* Subroutine */ int dsteqr_(char *compz, integer *n, doublereal *d__,
+ doublereal *e, doublereal *z__, integer *ldz, doublereal *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ doublereal d__1, d__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal b, c__, f, g;
+ static integer i__, j, k, l, m;
+ static doublereal p, r__, s;
+ static integer l1, ii, mm, lm1, mm1, nm1;
+ static doublereal rt1, rt2, eps;
+ static integer lsv;
+ static doublereal tst, eps2;
+ static integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dlasr_(char *, char *, char *, integer *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *);
+ static doublereal anorm;
+ extern /* Subroutine */ int dswap_(integer *, doublereal *, integer *,
+ doublereal *, integer *), dlaev2_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *, doublereal *,
+ doublereal *);
+ static integer lendm1, lendp1;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dlaset_(char *, integer *, integer
+ *, doublereal *, doublereal *, doublereal *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int dlartg_(doublereal *, doublereal *,
+ doublereal *, doublereal *, doublereal *);
+ static doublereal safmax;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static integer lendsv;
+ static doublereal ssfmin;
+ static integer nmaxit, icompz;
+ static doublereal ssfmax;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ DSTEQR computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the implicit QL or QR method.
+ The eigenvectors of a full or band symmetric matrix can also be found
+ if DSYTRD or DSPTRD or DSBTRD has been used to reduce this matrix to
+ tridiagonal form.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'V': Compute eigenvalues and eigenvectors of the original
+ symmetric matrix. On entry, Z must contain the
+ orthogonal matrix used to reduce the original matrix
+ to tridiagonal form.
+ = 'I': Compute eigenvalues and eigenvectors of the
+ tridiagonal matrix. Z is initialized to the identity
+ matrix.
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1, and if
+ eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (max(1,2*N-2))
+ If COMPZ = 'N', then WORK is not referenced.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm has failed to find all the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero; on exit, D
+ and E contain the elements of a symmetric tridiagonal
+ matrix which is orthogonally similar to the original
+ matrix.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ z__[z_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = EPSILON;
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = SAFEMINIMUM;
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/*
+ Compute the eigenvalues and eigenvectors of the tridiagonal
+ matrix.
+*/
+
+ if (icompz == 2) {
+ dlaset_("Full", n, n, &c_b2879, &c_b2865, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (d__1 = e[m], abs(d__1));
+ if (tst == 0.) {
+ goto L30;
+ }
+ if (tst <= sqrt((d__1 = d__[m], abs(d__1))) * sqrt((d__2 = d__[m
+ + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ + 1], abs(d__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ dlasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.);
+ r__ = dlapy2_(&g, &c_b2865);
+ g = d__[m] - p + e[l] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ dlasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ d__2 = (d__1 = e[m - 1], abs(d__1));
+ tst = d__2 * d__2;
+ if (tst <= eps2 * (d__1 = d__[m], abs(d__1)) * (d__2 = d__[m
+ - 1], abs(d__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use DLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ dlaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ dlasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ dlae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.);
+ r__ = dlapy2_(&g, &c_b2865);
+ g = d__[m] - p + e[l - 1] / (g + d_sign(&r__, &g));
+
+ s = 1.;
+ c__ = 1.;
+ p = 0.;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ dlartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2. * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ dlasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ goto L190;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ dlasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ dswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+
+L190:
+ return 0;
+
+/* End of DSTEQR */
+
+} /* dsteqr_ */
+
+/* Subroutine */ int dsterf_(integer *n, doublereal *d__, doublereal *e,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ doublereal d__1, d__2, d__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), d_sign(doublereal *, doublereal *);
+
+ /* Local variables */
+ static doublereal c__;
+ static integer i__, l, m;
+ static doublereal p, r__, s;
+ static integer l1;
+ static doublereal bb, rt1, rt2, eps, rte;
+ static integer lsv;
+ static doublereal eps2, oldc;
+ static integer lend, jtot;
+ extern /* Subroutine */ int dlae2_(doublereal *, doublereal *, doublereal
+ *, doublereal *, doublereal *);
+ static doublereal gamma, alpha, sigma, anorm;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *);
+ static doublereal oldgam, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal safmax;
+ extern doublereal dlanst_(char *, integer *, doublereal *, doublereal *);
+ extern /* Subroutine */ int dlasrt_(char *, integer *, doublereal *,
+ integer *);
+ static integer lendsv;
+ static doublereal ssfmin;
+ static integer nmaxit;
+ static doublereal ssfmax;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSTERF computes all eigenvalues of a symmetric tridiagonal matrix
+ using the Pal-Walker-Kahan variant of the QL or QR algorithm.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) DOUBLE PRECISION array, dimension (N)
+ On entry, the n diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) DOUBLE PRECISION array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed to find all of the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("DSTERF", &i__1);
+ return 0;
+ }
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the unit roundoff for this environment. */
+
+ eps = EPSILON;
+/* Computing 2nd power */
+ d__1 = eps;
+ eps2 = d__1 * d__1;
+ safmin = SAFEMINIMUM;
+ safmax = 1. / safmin;
+ ssfmax = sqrt(safmax) / 3.;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues of the tridiagonal matrix. */
+
+ nmaxit = *n * 30;
+ sigma = 0.;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+
+L10:
+ if (l1 > *n) {
+ goto L170;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.;
+ }
+ i__1 = *n - 1;
+ for (m = l1; m <= i__1; ++m) {
+ if ((d__3 = e[m], abs(d__3)) <= sqrt((d__1 = d__[m], abs(d__1))) *
+ sqrt((d__2 = d__[m + 1], abs(d__2))) * eps) {
+ e[m] = 0.;
+ goto L30;
+ }
+/* L20: */
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = dlanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ dlascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+ i__1 = lend - 1;
+ for (i__ = l; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ d__1 = e[i__];
+ e[i__] = d__1 * d__1;
+/* L40: */
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((d__1 = d__[lend], abs(d__1)) < (d__2 = d__[l], abs(d__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend >= l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L50:
+ if (l != lend) {
+ i__1 = lend - 1;
+ for (m = l; m <= i__1; ++m) {
+ if ((d__2 = e[m], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ + 1], abs(d__1))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ }
+ m = lend;
+
+L70:
+ if (m < lend) {
+ e[m] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L90;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use DLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l + 1) {
+ rte = sqrt(e[l]);
+ dlae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.;
+ l += 2;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l]);
+ sigma = (d__[l + 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b2865);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l;
+ for (i__ = m - 1; i__ >= i__1; --i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m - 1) {
+ e[i__ + 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__ + 1] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L80: */
+ }
+
+ e[l] = s * p;
+ d__[l] = sigma + gamma;
+ goto L50;
+
+/* Eigenvalue found. */
+
+L90:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L100:
+ i__1 = lend + 1;
+ for (m = l; m >= i__1; --m) {
+ if ((d__2 = e[m - 1], abs(d__2)) <= eps2 * (d__1 = d__[m] * d__[m
+ - 1], abs(d__1))) {
+ goto L120;
+ }
+/* L110: */
+ }
+ m = lend;
+
+L120:
+ if (m > lend) {
+ e[m - 1] = 0.;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L140;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use DLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l - 1) {
+ rte = sqrt(e[l - 1]);
+ dlae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l - 1] = rt2;
+ e[l - 1] = 0.;
+ l += -2;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l - 1]);
+ sigma = (d__[l - 1] - p) / (rte * 2.);
+ r__ = dlapy2_(&sigma, &c_b2865);
+ sigma = p - rte / (sigma + d_sign(&r__, &sigma));
+
+ c__ = 1.;
+ s = 0.;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l - 1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m) {
+ e[i__ - 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__ + 1];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__] = oldgam + (alpha - gamma);
+ if (c__ != 0.) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L130: */
+ }
+
+ e[l - 1] = s * p;
+ d__[l] = sigma + gamma;
+ goto L100;
+
+/* Eigenvalue found. */
+
+L140:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+
+ }
+
+/* Undo scaling if necessary */
+
+L150:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+ if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ dlascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.) {
+ ++(*info);
+ }
+/* L160: */
+ }
+ goto L180;
+
+/* Sort eigenvalues in increasing order. */
+
+L170:
+ dlasrt_("I", n, &d__[1], info);
+
+L180:
+ return 0;
+
+/* End of DSTERF */
+
+} /* dsterf_ */
+
+/* Subroutine */ int dsyevd_(char *jobz, char *uplo, integer *n, doublereal *
+ a, integer *lda, doublereal *w, doublereal *work, integer *lwork,
+ integer *iwork, integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static doublereal eps;
+ static integer inde;
+ static doublereal anrm, rmin, rmax;
+ static integer lopt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal sigma;
+ extern logical lsame_(char *, char *);
+ static integer iinfo, lwmin, liopt;
+ static logical lower, wantz;
+ static integer indwk2, llwrk2;
+
+ static integer iscale;
+ extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, integer *, doublereal *,
+ integer *, integer *), dstedc_(char *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ integer *, integer *, integer *, integer *), dlacpy_(
+ char *, integer *, integer *, doublereal *, integer *, doublereal
+ *, integer *);
+ static doublereal safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum;
+ static integer indtau;
+ extern /* Subroutine */ int dsterf_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern doublereal dlansy_(char *, char *, integer *, doublereal *,
+ integer *, doublereal *);
+ static integer indwrk, liwmin;
+ extern /* Subroutine */ int dormtr_(char *, char *, char *, integer *,
+ integer *, doublereal *, integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *, integer *), dsytrd_(char *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, doublereal *, doublereal *, integer *,
+ integer *);
+ static integer llwork;
+ static doublereal smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSYEVD computes all eigenvalues and, optionally, eigenvectors of a
+ real symmetric matrix A. If eigenvectors are desired, it uses a
+ divide and conquer algorithm.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Because of large use of BLAS of level 3, DSYEVD needs N**2 more
+ workspace than DSYEVX.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only;
+ = 'V': Compute eigenvalues and eigenvectors.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA, N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of A contains the
+ upper triangular part of the matrix A. If UPLO = 'L',
+ the leading N-by-N lower triangular part of A contains
+ the lower triangular part of the matrix A.
+ On exit, if JOBZ = 'V', then if INFO = 0, A contains the
+ orthonormal eigenvectors of the matrix A.
+ If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
+ or the upper triangle (if UPLO='U') of A, including the
+ diagonal, is destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) DOUBLE PRECISION array, dimension (N)
+ If INFO = 0, the eigenvalues in ascending order.
+
+ WORK (workspace/output) DOUBLE PRECISION array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If N <= 1, LWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1.
+ If JOBZ = 'V' and N > 1, LWORK must be at least
+ 1 + 6*N + 2*N**2.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If N <= 1, LIWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
+ If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the algorithm failed to converge; i
+ off-diagonal elements of an intermediate tridiagonal
+ form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = (*lwork == -1) || (*liwork == -1);
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ lopt = lwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + ((i__1 * i__1) << (1));
+ } else {
+ liwmin = 1;
+ lwmin = ((*n) << (1)) + 1;
+ }
+ lopt = lwmin;
+ liopt = liwmin;
+ }
+ if (! ((wantz) || (lsame_(jobz, "N")))) {
+ *info = -1;
+ } else if (! ((lower) || (lsame_(uplo, "U")))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ if (wantz) {
+ a[a_dim1 + 1] = 1.;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = SAFEMINIMUM;
+ eps = PRECISION;
+ smlnum = safmin / eps;
+ bignum = 1. / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = dlansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0. && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ dlascl_(uplo, &c__0, &c__0, &c_b2865, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call DSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+
+ dsytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+ lopt = (integer) (((*n) << (1)) + work[indwrk]);
+
+/*
+ For eigenvalues only, call DSTERF. For eigenvectors, first call
+ DSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
+ tridiagonal matrix, then call DORMTR to multiply it by the
+ Householder transformations stored in A.
+*/
+
+ if (! wantz) {
+ dsterf_(n, &w[1], &work[inde], info);
+ } else {
+ dstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ dormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ dlacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = *n;
+ i__1 = lopt, i__2 = *n * 6 + 1 + ((i__3 * i__3) << (1));
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ d__1 = 1. / sigma;
+ dscal_(n, &d__1, &w[1], &c__1);
+ }
+
+ work[1] = (doublereal) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of DSYEVD */
+
+} /* dsyevd_ */
+
+/* Subroutine */ int dsytd2_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal taui;
+ extern /* Subroutine */ int dsyr2_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static doublereal alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dsymv_(char *, integer *, doublereal *,
+ doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlarfg_(integer *, doublereal *,
+ doublereal *, integer *, doublereal *), xerbla_(char *, integer *
+ );
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ DSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal
+ form T by an orthogonal similarity transformation: Q' * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(1:i-1,i+1)
+*/
+
+ dlarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
+ + 1], &c__1, &taui);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ a[i__ + (i__ + 1) * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ dsymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b2879, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ alpha = taui * -.5 * ddot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
+ * a_dim1 + 1], &c__1);
+ daxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ dsyr2_(uplo, &i__, &c_b3001, &a[(i__ + 1) * a_dim1 + 1], &
+ c__1, &tau[1], &c__1, &a[a_offset], lda);
+
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ }
+ d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
+ tau[i__] = taui;
+/* L10: */
+ }
+ d__[1] = a[a_dim1 + 1];
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ dlarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &taui);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+
+ if (taui != 0.) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ a[i__ + 1 + i__ * a_dim1] = 1.;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ dsymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b2879, &
+ tau[i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5 * ddot_(&i__2, &tau[i__], &c__1, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ daxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ i__2 = *n - i__;
+ dsyr2_(uplo, &i__2, &c_b3001, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda);
+
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ }
+ d__[i__] = a[i__ + i__ * a_dim1];
+ tau[i__] = taui;
+/* L20: */
+ }
+ d__[*n] = a[*n + *n * a_dim1];
+ }
+
+ return 0;
+
+/* End of DSYTD2 */
+
+} /* dsytd2_ */
+
+/* Subroutine */ int dsytrd_(char *uplo, integer *n, doublereal *a, integer *
+ lda, doublereal *d__, doublereal *e, doublereal *tau, doublereal *
+ work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ static logical upper;
+ extern /* Subroutine */ int dsytd2_(char *, integer *, doublereal *,
+ integer *, doublereal *, doublereal *, doublereal *, integer *), dsyr2k_(char *, char *, integer *, integer *, doublereal
+ *, doublereal *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, integer *), dlatrd_(char *,
+ integer *, integer *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *), xerbla_(char *,
+ integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DSYTRD reduces a real symmetric matrix A to real symmetric
+ tridiagonal form T by an orthogonal similarity transformation:
+ Q**T * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) DOUBLE PRECISION array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) DOUBLE PRECISION array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) DOUBLE PRECISION array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) DOUBLE PRECISION array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
+ (ftnlen)1);
+ lwkopt = *n * nb;
+ work[1] = (doublereal) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DSYTRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "DSYTRD", uplo, n, &c_n1, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code by setting NX = N.
+
+ Computing MAX
+*/
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "DSYTRD", uplo, n, &c_n1, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/*
+ Reduce the upper triangle of A.
+ Columns 1:kk are handled by the unblocked method.
+*/
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = i__ + nb - 1;
+ dlatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(1:i-1,1:i-1), using an
+ update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = i__ - 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b3001, &a[i__ *
+ a_dim1 + 1], lda, &work[1], &ldwork, &c_b2865, &a[
+ a_offset], lda);
+
+/*
+ Copy superdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j - 1 + j * a_dim1] = e[j - 1];
+ d__[j] = a[j + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ dsytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = *n - i__ + 1;
+ dlatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(i+ib:n,i+ib:n), using
+ an update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = *n - i__ - nb + 1;
+ dsyr2k_(uplo, "No transpose", &i__3, &nb, &c_b3001, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b2865, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/*
+ Copy subdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + 1 + j * a_dim1] = e[j];
+ d__[j] = a[j + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ dsytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1] = (doublereal) lwkopt;
+ return 0;
+
+/* End of DSYTRD */
+
+} /* dsytrd_ */
+
+/* Subroutine */ int dtrevc_(char *side, char *howmny, logical *select,
+ integer *n, doublereal *t, integer *ldt, doublereal *vl, integer *
+ ldvl, doublereal *vr, integer *ldvr, integer *mm, integer *m,
+ doublereal *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ doublereal d__1, d__2, d__3, d__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static doublereal x[4] /* was [2][2] */;
+ static integer j1, j2, n2, ii, ki, ip, is;
+ static doublereal wi, wr, rec, ulp, beta, emax;
+ static logical pair;
+ extern doublereal ddot_(integer *, doublereal *, integer *, doublereal *,
+ integer *);
+ static logical allv;
+ static integer ierr;
+ static doublereal unfl, ovfl, smin;
+ static logical over;
+ static doublereal vmax;
+ static integer jnxt;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ static doublereal scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dgemv_(char *, integer *, integer *,
+ doublereal *, doublereal *, integer *, doublereal *, integer *,
+ doublereal *, doublereal *, integer *);
+ static doublereal remax;
+ extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
+ doublereal *, integer *);
+ static logical leftv, bothv;
+ extern /* Subroutine */ int daxpy_(integer *, doublereal *, doublereal *,
+ integer *, doublereal *, integer *);
+ static doublereal vcrit;
+ static logical somev;
+ static doublereal xnorm;
+ extern /* Subroutine */ int dlaln2_(logical *, integer *, integer *,
+ doublereal *, doublereal *, doublereal *, integer *, doublereal *,
+ doublereal *, doublereal *, integer *, doublereal *, doublereal *
+ , doublereal *, integer *, doublereal *, doublereal *, integer *),
+ dlabad_(doublereal *, doublereal *);
+
+ extern integer idamax_(integer *, doublereal *, integer *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static doublereal bignum;
+ static logical rightv;
+ static doublereal smlnum;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ DTREVC computes some or all of the right and/or left eigenvectors of
+ a real upper quasi-triangular matrix T.
+
+ The right eigenvector x and the left eigenvector y of T corresponding
+ to an eigenvalue w are defined by:
+
+ T*x = w*x, y'*T = w*y'
+
+ where y' denotes the conjugate transpose of the vector y.
+
+ If all eigenvectors are requested, the routine may either return the
+ matrices X and/or Y of right or left eigenvectors of T, or the
+ products Q*X and/or Q*Y, where Q is an input orthogonal
+ matrix. If T was obtained from the real-Schur factorization of an
+ original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
+ right or left eigenvectors of A.
+
+ T must be in Schur canonical form (as returned by DHSEQR), that is,
+ block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each
+ 2-by-2 diagonal block has its diagonal elements equal and its
+ off-diagonal elements of opposite sign. Corresponding to each 2-by-2
+ diagonal block is a complex conjugate pair of eigenvalues and
+ eigenvectors; only one eigenvector of the pair is computed, namely
+ the one corresponding to the eigenvalue with positive imaginary part.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'R': compute right eigenvectors only;
+ = 'L': compute left eigenvectors only;
+ = 'B': compute both right and left eigenvectors.
+
+ HOWMNY (input) CHARACTER*1
+ = 'A': compute all right and/or left eigenvectors;
+ = 'B': compute all right and/or left eigenvectors,
+ and backtransform them using the input matrices
+ supplied in VR and/or VL;
+ = 'S': compute selected right and/or left eigenvectors,
+ specified by the logical array SELECT.
+
+ SELECT (input/output) LOGICAL array, dimension (N)
+ If HOWMNY = 'S', SELECT specifies the eigenvectors to be
+ computed.
+ If HOWMNY = 'A' or 'B', SELECT is not referenced.
+ To select the real eigenvector corresponding to a real
+ eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select
+ the complex eigenvector corresponding to a complex conjugate
+ pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be
+ set to .TRUE.; then on exit SELECT(j) is .TRUE. and
+ SELECT(j+1) is .FALSE..
+
+ N (input) INTEGER
+ The order of the matrix T. N >= 0.
+
+ T (input) DOUBLE PRECISION array, dimension (LDT,N)
+ The upper quasi-triangular matrix T in Schur canonical form.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= max(1,N).
+
+ VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM)
+ On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by DHSEQR).
+ On exit, if SIDE = 'L' or 'B', VL contains:
+ if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
+ VL has the same quasi-lower triangular form
+ as T'. If T(i,i) is a real eigenvalue, then
+ the i-th column VL(i) of VL is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VL(i)+sqrt(-1)*VL(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*Y;
+ if HOWMNY = 'S', the left eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VL, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part, and the second the imaginary part.
+ If SIDE = 'R', VL is not referenced.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= max(1,N) if
+ SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
+
+ VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM)
+ On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by DHSEQR).
+ On exit, if SIDE = 'R' or 'B', VR contains:
+ if HOWMNY = 'A', the matrix X of right eigenvectors of T;
+ VR has the same quasi-upper triangular form
+ as T. If T(i,i) is a real eigenvalue, then
+ the i-th column VR(i) of VR is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VR(i)+sqrt(-1)*VR(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*X;
+ if HOWMNY = 'S', the right eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VR, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part and the second the imaginary part.
+ If SIDE = 'L', VR is not referenced.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= max(1,N) if
+ SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
+
+ MM (input) INTEGER
+ The number of columns in the arrays VL and/or VR. MM >= M.
+
+ M (output) INTEGER
+ The number of columns in the arrays VL and/or VR actually
+ used to store the eigenvectors.
+ If HOWMNY = 'A' or 'B', M is set to N.
+ Each selected real eigenvector occupies one column and each
+ selected complex eigenvector occupies two columns.
+
+ WORK (workspace) DOUBLE PRECISION array, dimension (3*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The algorithm used in this program is basically backward (forward)
+ substitution, with scaling to make the the code robust against
+ possible overflow.
+
+ Each eigenvector is normalized so that the element of largest
+ magnitude has magnitude 1; here the magnitude of a complex number
+ (x,y) is taken to be |x| + |y|.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = (lsame_(side, "R")) || (bothv);
+ leftv = (lsame_(side, "L")) || (bothv);
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if ((*ldvl < 1) || (leftv && *ldvl < *n)) {
+ *info = -8;
+ } else if ((*ldvr < 1) || (rightv && *ldvr < *n)) {
+ *info = -10;
+ } else {
+
+/*
+ Set M to the number of columns required to store the selected
+ eigenvectors, standardize the array SELECT if necessary, and
+ test MM.
+*/
+
+ if (somev) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (pair) {
+ pair = FALSE_;
+ select[j] = FALSE_;
+ } else {
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] == 0.) {
+ if (select[j]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if ((select[j]) || (select[j + 1])) {
+ select[j] = TRUE_;
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = SAFEMINIMUM;
+ ovfl = 1. / unfl;
+ dlabad_(&unfl, &ovfl);
+ ulp = PRECISION;
+ smlnum = unfl * (*n / ulp);
+ bignum = (1. - ulp) / smlnum;
+
+/*
+ Compute 1-norm of each column of strictly upper triangular
+ part of T to control overflow in triangular solver.
+*/
+
+ work[1] = 0.;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ work[j] = 0.;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[j] += (d__1 = t[i__ + j * t_dim1], abs(d__1));
+/* L20: */
+ }
+/* L30: */
+ }
+
+/*
+ Index IP is used to specify the real or complex eigenvalue:
+ IP = 0, real eigenvalue,
+ 1, first of conjugate complex pair: (wr,wi)
+ -1, second of conjugate complex pair: (wr,wi)
+*/
+
+ n2 = (*n) << (1);
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ ip = 0;
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (ip == 1) {
+ goto L130;
+ }
+ if (ki == 1) {
+ goto L40;
+ }
+ if (t[ki + (ki - 1) * t_dim1] == 0.) {
+ goto L40;
+ }
+ ip = -1;
+
+L40:
+ if (somev) {
+ if (ip == 0) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ } else {
+ if (! select[ki - 1]) {
+ goto L130;
+ }
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki - 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki - 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real right eigenvector */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -t[k + ki * t_dim1];
+/* L50: */
+ }
+
+/*
+ Solve the upper quasi-triangular system:
+ (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK.
+*/
+
+ jnxt = ki - 1;
+ for (j = ki - 1; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L60;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &c_b2879, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/*
+ Scale X(1,1) to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+
+/* Update right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__1, &smin, &c_b2865, &t[j
+ - 1 + (j - 1) * t_dim1], ldt, &c_b2865, &
+ c_b2865, &work[j - 1 + *n], n, &wr, &c_b2879,
+ x, &c__2, &scale, &xnorm, &ierr);
+
+/*
+ Scale X(1,1) and X(2,1) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[1] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+
+/* Update right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ }
+L60:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ ii = idamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + is * vr_dim1], abs(d__1));
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + is * vr_dim1] = 0.;
+/* L70: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ dgemv_("N", n, &i__1, &c_b2865, &vr[vr_offset], ldvr,
+ &work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vr[ii + ki * vr_dim1], abs(d__1));
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ } else {
+
+/*
+ Complex right eigenvector.
+
+ Initial solve
+ [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0.
+ [ (T(KI,KI-1) T(KI,KI) ) ]
+*/
+
+ if ((d__1 = t[ki - 1 + ki * t_dim1], abs(d__1)) >= (d__2 = t[
+ ki + (ki - 1) * t_dim1], abs(d__2))) {
+ work[ki - 1 + *n] = 1.;
+ work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
+ } else {
+ work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
+ work[ki + n2] = 1.;
+ }
+ work[ki + *n] = 0.;
+ work[ki - 1 + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__1 = ki - 2;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
+ t_dim1];
+ work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
+/* L80: */
+ }
+
+/*
+ Solve upper quasi-triangular system:
+ (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2)
+*/
+
+ jnxt = ki - 2;
+ for (j = ki - 2; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L90;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &wi, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/*
+ Scale X(1,1) and X(1,2) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[2] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+
+/* Update the right-hand side */
+
+ i__1 = j - 1;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 1;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ dlaln2_(&c_false, &c__2, &c__2, &smin, &c_b2865, &t[j
+ - 1 + (j - 1) * t_dim1], ldt, &c_b2865, &
+ c_b2865, &work[j - 1 + *n], n, &wr, &wi, x, &
+ c__2, &scale, &xnorm, &ierr);
+
+/*
+ Scale X to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.) {
+/* Computing MAX */
+ d__1 = work[j - 1], d__2 = work[j];
+ beta = max(d__1,d__2);
+ if (beta > bignum / xnorm) {
+ rec = 1. / xnorm;
+ x[0] *= rec;
+ x[2] *= rec;
+ x[1] *= rec;
+ x[3] *= rec;
+ scale *= rec;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ dscal_(&ki, &scale, &work[*n + 1], &c__1);
+ dscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+ work[j - 1 + n2] = x[2];
+ work[j + n2] = x[3];
+
+/* Update the right-hand side */
+
+ i__1 = j - 2;
+ d__1 = -x[0];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[1];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[2];
+ daxpy_(&i__1, &d__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[n2 + 1], &c__1);
+ i__1 = j - 2;
+ d__1 = -x[3];
+ daxpy_(&i__1, &d__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+ }
+L90:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ dcopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
+ + 1], &c__1);
+ dcopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ emax = 0.;
+ i__1 = ki;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (is - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + is * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L100: */
+ }
+
+ remax = 1. / emax;
+ dscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
+ dscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + (is - 1) * vr_dim1] = 0.;
+ vr[k + is * vr_dim1] = 0.;
+/* L110: */
+ }
+
+ } else {
+
+ if (ki > 2) {
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b2865, &vr[vr_offset], ldvr,
+ &work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[
+ (ki - 1) * vr_dim1 + 1], &c__1);
+ i__1 = ki - 2;
+ dgemv_("N", n, &i__1, &c_b2865, &vr[vr_offset], ldvr,
+ &work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ } else {
+ dscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
+ + 1], &c__1);
+ dscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
+ c__1);
+ }
+
+ emax = 0.;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vr[k + (ki - 1) * vr_dim1]
+ , abs(d__1)) + (d__2 = vr[k + ki * vr_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L120: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+ }
+
+ --is;
+ if (ip != 0) {
+ --is;
+ }
+L130:
+ if (ip == 1) {
+ ip = 0;
+ }
+ if (ip == -1) {
+ ip = 1;
+ }
+/* L140: */
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ ip = 0;
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (ip == -1) {
+ goto L250;
+ }
+ if (ki == *n) {
+ goto L150;
+ }
+ if (t[ki + 1 + ki * t_dim1] == 0.) {
+ goto L150;
+ }
+ ip = 1;
+
+L150:
+ if (somev) {
+ if (! select[ki]) {
+ goto L250;
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.;
+ if (ip != 0) {
+ wi = sqrt((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1))) *
+ sqrt((d__2 = t[ki + 1 + ki * t_dim1], abs(d__2)));
+ }
+/* Computing MAX */
+ d__1 = ulp * (abs(wr) + abs(wi));
+ smin = max(d__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real left eigenvector. */
+
+ work[ki + *n] = 1.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ work[k + *n] = -t[ki + k * t_dim1];
+/* L160: */
+ }
+
+/*
+ Solve the quasi-triangular system:
+ (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK
+*/
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 1;
+ i__2 = *n;
+ for (j = ki + 1; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L170;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve (T(J,J)-WR)'*X = WORK */
+
+ dlaln2_(&c_false, &c__1, &c__1, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &c_b2879, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+/* Computing MAX */
+ d__2 = (d__1 = work[j + *n], abs(d__1));
+ vmax = max(d__2,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+
+ Computing MAX
+*/
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+ i__3 = j - ki - 1;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 1 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
+
+/*
+ Solve
+ [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 )
+ [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 )
+*/
+
+ dlaln2_(&c_true, &c__2, &c__1, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &c_b2879, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + 1 + *n] = x[1];
+
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + 1 + *n], abs(d__2)), d__3 = max(
+ d__3,d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L170:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+
+ i__2 = *n - ki + 1;
+ ii = idamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
+ 1;
+ remax = 1. / (d__1 = vl[ii + is * vl_dim1], abs(d__1));
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+/* L180: */
+ }
+
+ } else {
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ dgemv_("N", n, &i__2, &c_b2865, &vl[(ki + 1) *
+ vl_dim1 + 1], ldvl, &work[ki + 1 + *n], &c__1,
+ &work[ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+ ii = idamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ remax = 1. / (d__1 = vl[ii + ki * vl_dim1], abs(d__1));
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+
+ }
+
+ } else {
+
+/*
+ Complex left eigenvector.
+
+ Initial solve:
+ ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0.
+ ((T(KI+1,KI) T(KI+1,KI+1)) )
+*/
+
+ if ((d__1 = t[ki + (ki + 1) * t_dim1], abs(d__1)) >= (d__2 =
+ t[ki + 1 + ki * t_dim1], abs(d__2))) {
+ work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
+ work[ki + 1 + n2] = 1.;
+ } else {
+ work[ki + *n] = 1.;
+ work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
+ }
+ work[ki + 1 + *n] = 0.;
+ work[ki + n2] = 0.;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 2; k <= i__2; ++k) {
+ work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
+ work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
+ ;
+/* L190: */
+ }
+
+/*
+ Solve complex quasi-triangular system:
+ ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2
+*/
+
+ vmax = 1.;
+ vcrit = bignum;
+
+ jnxt = ki + 2;
+ i__2 = *n;
+ for (j = ki + 2; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L200;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when
+ forming the right-hand side elements.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
+
+ d__1 = -wi;
+ dlaln2_(&c_false, &c__1, &c__2, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &d__1, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+/* Computing MAX */
+ d__3 = (d__1 = work[j + *n], abs(d__1)), d__4 = (d__2
+ = work[j + n2], abs(d__2)), d__3 = max(d__3,
+ d__4);
+ vmax = max(d__3,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side elements.
+
+ Computing MAX
+*/
+ d__1 = work[j], d__2 = work[j + 1];
+ beta = max(d__1,d__2);
+ if (beta > vcrit) {
+ rec = 1. / vmax;
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + n2] -= ddot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + *n] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + n2] -= ddot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
+
+/*
+ Solve 2-by-2 complex linear equation
+ ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B
+ ([T(j+1,j) T(j+1,j+1)] )
+*/
+
+ d__1 = -wi;
+ dlaln2_(&c_true, &c__2, &c__2, &smin, &c_b2865, &t[j
+ + j * t_dim1], ldt, &c_b2865, &c_b2865, &work[
+ j + *n], n, &wr, &d__1, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.) {
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ dscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+ work[j + 1 + *n] = x[1];
+ work[j + 1 + n2] = x[3];
+/* Computing MAX */
+ d__1 = abs(x[0]), d__2 = abs(x[2]), d__1 = max(d__1,
+ d__2), d__2 = abs(x[1]), d__1 = max(d__1,d__2)
+ , d__2 = abs(x[3]), d__1 = max(d__1,d__2);
+ vmax = max(d__1,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L200:
+ ;
+ }
+
+/*
+ Copy the vector x or Q*x to VL and normalize.
+
+ L210:
+*/
+ if (! over) {
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dcopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
+ vl_dim1], &c__1);
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = ki; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + is * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (is + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L220: */
+ }
+ remax = 1. / emax;
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ dscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
+ ;
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.;
+ vl[k + (is + 1) * vl_dim1] = 0.;
+/* L230: */
+ }
+ } else {
+ if (ki < *n - 1) {
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b2865, &vl[(ki + 2) *
+ vl_dim1 + 1], ldvl, &work[ki + 2 + *n], &c__1,
+ &work[ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = *n - ki - 1;
+ dgemv_("N", n, &i__2, &c_b2865, &vl[(ki + 2) *
+ vl_dim1 + 1], ldvl, &work[ki + 2 + n2], &c__1,
+ &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1 +
+ 1], &c__1);
+ } else {
+ dscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
+ c__1);
+ dscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
+ + 1], &c__1);
+ }
+
+ emax = 0.;
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ d__3 = emax, d__4 = (d__1 = vl[k + ki * vl_dim1], abs(
+ d__1)) + (d__2 = vl[k + (ki + 1) * vl_dim1],
+ abs(d__2));
+ emax = max(d__3,d__4);
+/* L240: */
+ }
+ remax = 1. / emax;
+ dscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ dscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
+
+ }
+
+ }
+
+ ++is;
+ if (ip != 0) {
+ ++is;
+ }
+L250:
+ if (ip == -1) {
+ ip = 0;
+ }
+ if (ip == 1) {
+ ip = -1;
+ }
+
+/* L260: */
+ }
+
+ }
+
+ return 0;
+
+/* End of DTREVC */
+
+} /* dtrevc_ */
+
+/* Subroutine */ int dtrti2_(char *uplo, char *diag, integer *n, doublereal *
+ a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer j;
+ static doublereal ajj;
+ extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
+ integer *);
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int dtrmv_(char *, char *, char *, integer *,
+ doublereal *, integer *, doublereal *, integer *), xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ DTRTI2 computes the inverse of a real upper or lower triangular
+ matrix.
+
+ This is the Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading n by n upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1. / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ dtrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ dscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1. / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ dtrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ dscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of DTRTI2 */
+
+} /* dtrti2_ */
+
+/* Subroutine */ int dtrtri_(char *uplo, char *diag, integer *n, doublereal *
+ a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int dtrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublereal *, doublereal *, integer *,
+ doublereal *, integer *), dtrsm_(
+ char *, char *, char *, char *, integer *, integer *, doublereal *
+ , doublereal *, integer *, doublereal *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int dtrti2_(char *, char *, integer *, doublereal
+ *, integer *, integer *), xerbla_(char *, integer
+ *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ DTRTRI computes the inverse of a real upper or lower triangular
+ matrix A.
+
+ This is the Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': A is upper triangular;
+ = 'L': A is lower triangular.
+
+ DIAG (input) CHARACTER*1
+ = 'N': A is non-unit triangular;
+ = 'U': A is unit triangular.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, A(i,i) is exactly zero. The triangular
+ matrix is singular and its inverse can not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("DTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/*
+ Determine the block size for this environment.
+
+ Writing concatenation
+*/
+ i__2[0] = 1, a__1[0] = uplo;
+ i__2[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "DTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ dtrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__3 = nb;
+ for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ dtrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b2865, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ dtrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b3001, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ dtrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__3 = -nb;
+ for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ dtrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b2865, &a[j + jb + (j + jb) * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ dtrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b3001, &a[j + j * a_dim1], lda, &a[j + jb + j
+ * a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ dtrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of DTRTRI */
+
+} /* dtrtri_ */
+
+integer ieeeck_(integer *ispec, real *zero, real *one)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Local variables */
+ static real nan1, nan2, nan3, nan4, nan5, nan6, neginf, posinf, negzro,
+ newzro;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1998
+
+
+ Purpose
+ =======
+
+ IEEECK is called from the ILAENV to verify that Infinity and
+ possibly NaN arithmetic is safe (i.e. will not trap).
+
+ Arguments
+ =========
+
+ ISPEC (input) INTEGER
+ Specifies whether to test just for inifinity arithmetic
+ or whether to test for infinity and NaN arithmetic.
+ = 0: Verify infinity arithmetic only.
+ = 1: Verify infinity and NaN arithmetic.
+
+ ZERO (input) REAL
+ Must contain the value 0.0
+ This is passed to prevent the compiler from optimizing
+ away this code.
+
+ ONE (input) REAL
+ Must contain the value 1.0
+ This is passed to prevent the compiler from optimizing
+ away this code.
+
+ RETURN VALUE: INTEGER
+ = 0: Arithmetic failed to produce the correct answers
+ = 1: Arithmetic produced the correct answers
+*/
+
+ ret_val = 1;
+
+ posinf = *one / *zero;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = -(*one) / *zero;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ negzro = *one / (neginf + *one);
+ if (negzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf = *one / negzro;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ newzro = negzro + *zero;
+ if (newzro != *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf = *one / newzro;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ neginf *= posinf;
+ if (neginf >= *zero) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ posinf *= posinf;
+ if (posinf <= *one) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+
+/* Return if we were only asked to check infinity arithmetic */
+
+ if (*ispec == 0) {
+ return ret_val;
+ }
+
+ nan1 = posinf + neginf;
+
+ nan2 = posinf / neginf;
+
+ nan3 = posinf / posinf;
+
+ nan4 = posinf * *zero;
+
+ nan5 = neginf * negzro;
+
+ nan6 = nan5 * 0.f;
+
+ if (nan1 == nan1) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan2 == nan2) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan3 == nan3) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan4 == nan4) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan5 == nan5) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ if (nan6 == nan6) {
+ ret_val = 0;
+ return ret_val;
+ }
+
+ return ret_val;
+} /* ieeeck_ */
+
+integer ilaenv_(integer *ispec, char *name__, char *opts, integer *n1,
+ integer *n2, integer *n3, integer *n4, ftnlen name_len, ftnlen
+ opts_len)
+{
+ /* System generated locals */
+ integer ret_val;
+
+ /* Builtin functions */
+ /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen);
+ integer s_cmp(char *, char *, ftnlen, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static char c1[1], c2[2], c3[3], c4[2];
+ static integer ic, nb, iz, nx;
+ static logical cname, sname;
+ static integer nbmin;
+ extern integer ieeeck_(integer *, real *, real *);
+ static char subnam[6];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ ILAENV is called from the LAPACK routines to choose problem-dependent
+ parameters for the local environment. See ISPEC for a description of
+ the parameters.
+
+ This version provides a set of parameters which should give good,
+ but not optimal, performance on many of the currently available
+ computers. Users are encouraged to modify this subroutine to set
+ the tuning parameters for their particular machine using the option
+ and problem size information in the arguments.
+
+ This routine will not function correctly if it is converted to all
+ lower case. Converting it to all upper case is allowed.
+
+ Arguments
+ =========
+
+ ISPEC (input) INTEGER
+ Specifies the parameter to be returned as the value of
+ ILAENV.
+ = 1: the optimal blocksize; if this value is 1, an unblocked
+ algorithm will give the best performance.
+ = 2: the minimum block size for which the block routine
+ should be used; if the usable block size is less than
+ this value, an unblocked routine should be used.
+ = 3: the crossover point (in a block routine, for N less
+ than this value, an unblocked routine should be used)
+ = 4: the number of shifts, used in the nonsymmetric
+ eigenvalue routines
+ = 5: the minimum column dimension for blocking to be used;
+ rectangular blocks must have dimension at least k by m,
+ where k is given by ILAENV(2,...) and m by ILAENV(5,...)
+ = 6: the crossover point for the SVD (when reducing an m by n
+ matrix to bidiagonal form, if max(m,n)/min(m,n) exceeds
+ this value, a QR factorization is used first to reduce
+ the matrix to a triangular form.)
+ = 7: the number of processors
+ = 8: the crossover point for the multishift QR and QZ methods
+ for nonsymmetric eigenvalue problems.
+ = 9: maximum size of the subproblems at the bottom of the
+ computation tree in the divide-and-conquer algorithm
+ (used by xGELSD and xGESDD)
+ =10: ieee NaN arithmetic can be trusted not to trap
+ =11: infinity arithmetic can be trusted not to trap
+
+ NAME (input) CHARACTER*(*)
+ The name of the calling subroutine, in either upper case or
+ lower case.
+
+ OPTS (input) CHARACTER*(*)
+ The character options to the subroutine NAME, concatenated
+ into a single character string. For example, UPLO = 'U',
+ TRANS = 'T', and DIAG = 'N' for a triangular routine would
+ be specified as OPTS = 'UTN'.
+
+ N1 (input) INTEGER
+ N2 (input) INTEGER
+ N3 (input) INTEGER
+ N4 (input) INTEGER
+ Problem dimensions for the subroutine NAME; these may not all
+ be required.
+
+ (ILAENV) (output) INTEGER
+ >= 0: the value of the parameter specified by ISPEC
+ < 0: if ILAENV = -k, the k-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The following conventions have been used when calling ILAENV from the
+ LAPACK routines:
+ 1) OPTS is a concatenation of all of the character options to
+ subroutine NAME, in the same order that they appear in the
+ argument list for NAME, even if they are not used in determining
+ the value of the parameter specified by ISPEC.
+ 2) The problem dimensions N1, N2, N3, N4 are specified in the order
+ that they appear in the argument list for NAME. N1 is used
+ first, N2 second, and so on, and unused problem dimensions are
+ passed a value of -1.
+ 3) The parameter value returned by ILAENV is checked for validity in
+ the calling subroutine. For example, ILAENV is used to retrieve
+ the optimal blocksize for STRTRI as follows:
+
+ NB = ILAENV( 1, 'STRTRI', UPLO // DIAG, N, -1, -1, -1 )
+ IF( NB.LE.1 ) NB = MAX( 1, N )
+
+ =====================================================================
+*/
+
+
+ switch (*ispec) {
+ case 1: goto L100;
+ case 2: goto L100;
+ case 3: goto L100;
+ case 4: goto L400;
+ case 5: goto L500;
+ case 6: goto L600;
+ case 7: goto L700;
+ case 8: goto L800;
+ case 9: goto L900;
+ case 10: goto L1000;
+ case 11: goto L1100;
+ }
+
+/* Invalid value for ISPEC */
+
+ ret_val = -1;
+ return ret_val;
+
+L100:
+
+/* Convert NAME to upper case if the first character is lower case. */
+
+ ret_val = 1;
+ s_copy(subnam, name__, (ftnlen)6, name_len);
+ ic = *(unsigned char *)subnam;
+ iz = 'Z';
+ if ((iz == 90) || (iz == 122)) {
+
+/* ASCII character set */
+
+ if (ic >= 97 && ic <= 122) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (ic >= 97 && ic <= 122) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L10: */
+ }
+ }
+
+ } else if ((iz == 233) || (iz == 169)) {
+
+/* EBCDIC character set */
+
+ if (((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153)) || (ic >=
+ 162 && ic <= 169)) {
+ *(unsigned char *)subnam = (char) (ic + 64);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (((ic >= 129 && ic <= 137) || (ic >= 145 && ic <= 153)) ||
+ (ic >= 162 && ic <= 169)) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic + 64);
+ }
+/* L20: */
+ }
+ }
+
+ } else if ((iz == 218) || (iz == 250)) {
+
+/* Prime machines: ASCII+128 */
+
+ if (ic >= 225 && ic <= 250) {
+ *(unsigned char *)subnam = (char) (ic - 32);
+ for (i__ = 2; i__ <= 6; ++i__) {
+ ic = *(unsigned char *)&subnam[i__ - 1];
+ if (ic >= 225 && ic <= 250) {
+ *(unsigned char *)&subnam[i__ - 1] = (char) (ic - 32);
+ }
+/* L30: */
+ }
+ }
+ }
+
+ *(unsigned char *)c1 = *(unsigned char *)subnam;
+ sname = (*(unsigned char *)c1 == 'S') || (*(unsigned char *)c1 == 'D');
+ cname = (*(unsigned char *)c1 == 'C') || (*(unsigned char *)c1 == 'Z');
+ if (! ((cname) || (sname))) {
+ return ret_val;
+ }
+ s_copy(c2, subnam + 1, (ftnlen)2, (ftnlen)2);
+ s_copy(c3, subnam + 3, (ftnlen)3, (ftnlen)3);
+ s_copy(c4, c3 + 1, (ftnlen)2, (ftnlen)2);
+
+ switch (*ispec) {
+ case 1: goto L110;
+ case 2: goto L200;
+ case 3: goto L300;
+ }
+
+L110:
+
+/*
+ ISPEC = 1: block size
+
+ In these examples, separate code is provided for setting NB for
+ real and complex. We assume that NB will take the same value in
+ single or double precision.
+*/
+
+ nb = 1;
+
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if ((((s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0) || (s_cmp(
+ c3, "RQF", (ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "LQF", (
+ ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "QLF", (ftnlen)3, (
+ ftnlen)3) == 0)) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 32;
+ } else {
+ nb = 32;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "PO", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 32;
+ } else if (sname && s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 64;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 64;
+ } else if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 32;
+ } else if (s_cmp(c3, "GST", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 64;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nb = 32;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nb = 32;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nb = 32;
+ }
+ }
+ } else if (s_cmp(c2, "GB", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n4 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "PB", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ } else {
+ if (*n2 <= 64) {
+ nb = 1;
+ } else {
+ nb = 32;
+ }
+ }
+ }
+ } else if (s_cmp(c2, "TR", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (s_cmp(c2, "LA", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "UUM", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nb = 64;
+ } else {
+ nb = 64;
+ }
+ }
+ } else if (sname && s_cmp(c2, "ST", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "EBZ", (ftnlen)3, (ftnlen)3) == 0) {
+ nb = 1;
+ }
+ }
+ ret_val = nb;
+ return ret_val;
+
+L200:
+
+/* ISPEC = 2: minimum block size */
+
+ nbmin = 2;
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if ((((s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0) || (s_cmp(c3,
+ "RQF", (ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "LQF", (
+ ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "QLF", (ftnlen)3, (
+ ftnlen)3) == 0)) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ } else if (s_cmp(c3, "TRI", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 2;
+ } else {
+ nbmin = 2;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRF", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nbmin = 8;
+ } else {
+ nbmin = 8;
+ }
+ } else if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nbmin = 2;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nbmin = 2;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nbmin = 2;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nbmin = 2;
+ }
+ } else if (*(unsigned char *)c3 == 'M') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nbmin = 2;
+ }
+ }
+ }
+ ret_val = nbmin;
+ return ret_val;
+
+L300:
+
+/* ISPEC = 3: crossover point */
+
+ nx = 0;
+ if (s_cmp(c2, "GE", (ftnlen)2, (ftnlen)2) == 0) {
+ if ((((s_cmp(c3, "QRF", (ftnlen)3, (ftnlen)3) == 0) || (s_cmp(c3,
+ "RQF", (ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "LQF", (
+ ftnlen)3, (ftnlen)3) == 0)) || (s_cmp(c3, "QLF", (ftnlen)3, (
+ ftnlen)3) == 0)) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "HRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ } else if (s_cmp(c3, "BRD", (ftnlen)3, (ftnlen)3) == 0) {
+ if (sname) {
+ nx = 128;
+ } else {
+ nx = 128;
+ }
+ }
+ } else if (s_cmp(c2, "SY", (ftnlen)2, (ftnlen)2) == 0) {
+ if (sname && s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nx = 32;
+ }
+ } else if (cname && s_cmp(c2, "HE", (ftnlen)2, (ftnlen)2) == 0) {
+ if (s_cmp(c3, "TRD", (ftnlen)3, (ftnlen)3) == 0) {
+ nx = 32;
+ }
+ } else if (sname && s_cmp(c2, "OR", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nx = 128;
+ }
+ }
+ } else if (cname && s_cmp(c2, "UN", (ftnlen)2, (ftnlen)2) == 0) {
+ if (*(unsigned char *)c3 == 'G') {
+ if (((((((s_cmp(c4, "QR", (ftnlen)2, (ftnlen)2) == 0) || (s_cmp(
+ c4, "RQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4,
+ "LQ", (ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "QL", (
+ ftnlen)2, (ftnlen)2) == 0)) || (s_cmp(c4, "HR", (ftnlen)2,
+ (ftnlen)2) == 0)) || (s_cmp(c4, "TR", (ftnlen)2, (ftnlen)
+ 2) == 0)) || (s_cmp(c4, "BR", (ftnlen)2, (ftnlen)2) == 0))
+ {
+ nx = 128;
+ }
+ }
+ }
+ ret_val = nx;
+ return ret_val;
+
+L400:
+
+/* ISPEC = 4: number of shifts (used by xHSEQR) */
+
+ ret_val = 6;
+ return ret_val;
+
+L500:
+
+/* ISPEC = 5: minimum column dimension (not used) */
+
+ ret_val = 2;
+ return ret_val;
+
+L600:
+
+/* ISPEC = 6: crossover point for SVD (used by xGELSS and xGESVD) */
+
+ ret_val = (integer) ((real) min(*n1,*n2) * 1.6f);
+ return ret_val;
+
+L700:
+
+/* ISPEC = 7: number of processors (not used) */
+
+ ret_val = 1;
+ return ret_val;
+
+L800:
+
+/* ISPEC = 8: crossover point for multishift (used by xHSEQR) */
+
+ ret_val = 50;
+ return ret_val;
+
+L900:
+
+/*
+ ISPEC = 9: maximum size of the subproblems at the bottom of the
+ computation tree in the divide-and-conquer algorithm
+ (used by xGELSD and xGESDD)
+*/
+
+ ret_val = 25;
+ return ret_val;
+
+L1000:
+
+/*
+ ISPEC = 10: ieee NaN arithmetic can be trusted not to trap
+
+ ILAENV = 0
+*/
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__0, &c_b320, &c_b1011);
+ }
+ return ret_val;
+
+L1100:
+
+/*
+ ISPEC = 11: infinity arithmetic can be trusted not to trap
+
+ ILAENV = 0
+*/
+ ret_val = 1;
+ if (ret_val == 1) {
+ ret_val = ieeeck_(&c__1, &c_b320, &c_b1011);
+ }
+ return ret_val;
+
+/* End of ILAENV */
+
+} /* ilaenv_ */
+
+/* Subroutine */ int sbdsdc_(char *uplo, char *compq, integer *n, real *d__,
+ real *e, real *u, integer *ldu, real *vt, integer *ldvt, real *q,
+ integer *iq, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *), log(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static real p, r__;
+ static integer z__, ic, ii, kk;
+ static real cs;
+ static integer is, iu;
+ static real sn;
+ static integer nm1;
+ static real eps;
+ static integer ivt, difl, difr, ierr, perm, mlvl, sqre;
+ extern logical lsame_(char *, char *);
+ static integer poles;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ static integer iuplo, nsize, start;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), sswap_(integer *, real *, integer *, real *, integer *
+ ), slasd0_(integer *, integer *, real *, real *, real *, integer *
+ , real *, integer *, integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ static integer givcol;
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *);
+ static integer icompq;
+ extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
+ real *, real *, integer *), slartg_(real *, real *, real *
+ , real *, real *);
+ static real orgnrm;
+ static integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ static integer givptr, qstart, smlsiz, wstart, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ SBDSDC computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT,
+ using a divide and conquer method, where S is a diagonal matrix
+ with non-negative diagonal elements (the singular values of B), and
+ U and VT are orthogonal matrices of left and right singular vectors,
+ respectively. SBDSDC can be used to compute all singular values,
+ and optionally, singular vectors or singular vectors in compact form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See SLASD3 for details.
+
+ The code currently call SLASDQ if singular values only are desired.
+ However, it can be slightly modified to compute singular values
+ using the divide and conquer method.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal.
+ = 'L': B is lower bidiagonal.
+
+ COMPQ (input) CHARACTER*1
+ Specifies whether singular vectors are to be computed
+ as follows:
+ = 'N': Compute singular values only;
+ = 'P': Compute singular values and compute singular
+ vectors in compact form;
+ = 'I': Compute singular values and singular vectors.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B.
+
+ E (input/output) REAL array, dimension (N)
+ On entry, the elements of E contain the offdiagonal
+ elements of the bidiagonal matrix whose SVD is desired.
+ On exit, E has been destroyed.
+
+ U (output) REAL array, dimension (LDU,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, U contains the left singular vectors
+ of the bidiagonal matrix.
+ For other values of COMPQ, U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1.
+ If singular vectors are desired, then LDU >= max( 1, N ).
+
+ VT (output) REAL array, dimension (LDVT,N)
+ If COMPQ = 'I', then:
+ On exit, if INFO = 0, VT' contains the right singular
+ vectors of the bidiagonal matrix.
+ For other values of COMPQ, VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1.
+ If singular vectors are desired, then LDVT >= max( 1, N ).
+
+ Q (output) REAL array, dimension (LDQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, Q contains all the REAL data in
+ LDQ >= N*(11 + 2*SMLSIZ + 8*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, Q is not referenced.
+
+ IQ (output) INTEGER array, dimension (LDIQ)
+ If COMPQ = 'P', then:
+ On exit, if INFO = 0, Q and IQ contain the left
+ and right singular vectors in a compact form,
+ requiring O(N log N) space instead of 2*N**2.
+ In particular, IQ contains all INTEGER data in
+ LDIQ >= N*(3 + 3*INT(LOG_2(N/(SMLSIZ+1))))
+ words of memory, where SMLSIZ is returned by ILAENV and
+ is equal to the maximum size of the subproblems at the
+ bottom of the computation tree (usually about 25).
+ For other values of COMPQ, IQ is not referenced.
+
+ WORK (workspace) REAL array, dimension (LWORK)
+ If COMPQ = 'N' then LWORK >= (4 * N).
+ If COMPQ = 'P' then LWORK >= (6 * N).
+ If COMPQ = 'I' then LWORK >= (3 * N**2 + 4 * N).
+
+ IWORK (workspace) INTEGER array, dimension (8*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value.
+ The update process of divide and conquer failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --q;
+ --iq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (lsame_(compq, "N")) {
+ icompq = 0;
+ } else if (lsame_(compq, "P")) {
+ icompq = 1;
+ } else if (lsame_(compq, "I")) {
+ icompq = 2;
+ } else {
+ icompq = -1;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if (icompq < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ldu < 1) || (icompq == 2 && *ldu < *n)) {
+ *info = -7;
+ } else if ((*ldvt < 1) || (icompq == 2 && *ldvt < *n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SBDSDC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ smlsiz = ilaenv_(&c__9, "SBDSDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+ if (*n == 1) {
+ if (icompq == 1) {
+ q[1] = r_sign(&c_b1011, &d__[1]);
+ q[smlsiz * *n + 1] = 1.f;
+ } else if (icompq == 2) {
+ u[u_dim1 + 1] = r_sign(&c_b1011, &d__[1]);
+ vt[vt_dim1 + 1] = 1.f;
+ }
+ d__[1] = dabs(d__[1]);
+ return 0;
+ }
+ nm1 = *n - 1;
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ wstart = 1;
+ qstart = 3;
+ if (icompq == 1) {
+ scopy_(n, &d__[1], &c__1, &q[1], &c__1);
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &q[*n + 1], &c__1);
+ }
+ if (iuplo == 2) {
+ qstart = 5;
+ wstart = ((*n) << (1)) - 1;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (icompq == 1) {
+ q[i__ + ((*n) << (1))] = cs;
+ q[i__ + *n * 3] = sn;
+ } else if (icompq == 2) {
+ work[i__] = cs;
+ work[nm1 + i__] = -sn;
+ }
+/* L10: */
+ }
+ }
+
+/* If ICOMPQ = 0, use SLASDQ to compute the singular values. */
+
+ if (icompq == 0) {
+ slasdq_("U", &c__0, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ goto L40;
+ }
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompq == 2) {
+ slaset_("A", n, n, &c_b320, &c_b1011, &u[u_offset], ldu);
+ slaset_("A", n, n, &c_b320, &c_b1011, &vt[vt_offset], ldvt);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[
+ wstart], info);
+ } else if (icompq == 1) {
+ iu = 1;
+ ivt = iu + *n;
+ slaset_("A", n, n, &c_b320, &c_b1011, &q[iu + (qstart - 1) * *n],
+ n);
+ slaset_("A", n, n, &c_b320, &c_b1011, &q[ivt + (qstart - 1) * *n],
+ n);
+ slasdq_("U", &c__0, n, n, n, &c__0, &d__[1], &e[1], &q[ivt + (
+ qstart - 1) * *n], n, &q[iu + (qstart - 1) * *n], n, &q[
+ iu + (qstart - 1) * *n], n, &work[wstart], info);
+ }
+ goto L40;
+ }
+
+ if (icompq == 2) {
+ slaset_("A", n, n, &c_b320, &c_b1011, &u[u_offset], ldu);
+ slaset_("A", n, n, &c_b320, &c_b1011, &vt[vt_offset], ldvt)
+ ;
+ }
+
+/* Scale. */
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ return 0;
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, &c__1, &d__[1], n, &ierr);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &nm1, &c__1, &e[1], &nm1, &
+ ierr);
+
+ eps = slamch_("Epsilon");
+
+ mlvl = (integer) (log((real) (*n) / (real) (smlsiz + 1)) / log(2.f)) + 1;
+ smlszp = smlsiz + 1;
+
+ if (icompq == 1) {
+ iu = 1;
+ ivt = smlsiz + 1;
+ difl = ivt + smlszp;
+ difr = difl + mlvl;
+ z__ = difr + ((mlvl) << (1));
+ ic = z__ + mlvl;
+ is = ic + 1;
+ poles = is + 1;
+ givnum = poles + ((mlvl) << (1));
+
+ k = 1;
+ givptr = 2;
+ perm = 3;
+ givcol = perm + mlvl;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L20: */
+ }
+
+ start = 1;
+ sqre = 0;
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (((r__1 = e[i__], dabs(r__1)) < eps) || (i__ == nm1)) {
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - start + 1;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - start + 1;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N). Solve this 1-by-1 problem
+ first.
+*/
+
+ nsize = i__ - start + 1;
+ if (icompq == 2) {
+ u[*n + *n * u_dim1] = r_sign(&c_b1011, &d__[*n]);
+ vt[*n + *n * vt_dim1] = 1.f;
+ } else if (icompq == 1) {
+ q[*n + (qstart - 1) * *n] = r_sign(&c_b1011, &d__[*n]);
+ q[*n + (smlsiz + qstart - 1) * *n] = 1.f;
+ }
+ d__[*n] = (r__1 = d__[*n], dabs(r__1));
+ }
+ if (icompq == 2) {
+ slasd0_(&nsize, &sqre, &d__[start], &e[start], &u[start +
+ start * u_dim1], ldu, &vt[start + start * vt_dim1],
+ ldvt, &smlsiz, &iwork[1], &work[wstart], info);
+ } else {
+ slasda_(&icompq, &smlsiz, &nsize, &sqre, &d__[start], &e[
+ start], &q[start + (iu + qstart - 2) * *n], n, &q[
+ start + (ivt + qstart - 2) * *n], &iq[start + k * *n],
+ &q[start + (difl + qstart - 2) * *n], &q[start + (
+ difr + qstart - 2) * *n], &q[start + (z__ + qstart -
+ 2) * *n], &q[start + (poles + qstart - 2) * *n], &iq[
+ start + givptr * *n], &iq[start + givcol * *n], n, &
+ iq[start + perm * *n], &q[start + (givnum + qstart -
+ 2) * *n], &q[start + (ic + qstart - 2) * *n], &q[
+ start + (is + qstart - 2) * *n], &work[wstart], &
+ iwork[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ start = i__ + 1;
+ }
+/* L30: */
+ }
+
+/* Unscale */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, n, &c__1, &d__[1], n, &ierr);
+L40:
+
+/* Use Selection Sort to minimize swaps of singular vectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ kk = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] > p) {
+ kk = j;
+ p = d__[j];
+ }
+/* L50: */
+ }
+ if (kk != i__) {
+ d__[kk] = d__[i__];
+ d__[i__] = p;
+ if (icompq == 1) {
+ iq[i__] = kk;
+ } else if (icompq == 2) {
+ sswap_(n, &u[i__ * u_dim1 + 1], &c__1, &u[kk * u_dim1 + 1], &
+ c__1);
+ sswap_(n, &vt[i__ + vt_dim1], ldvt, &vt[kk + vt_dim1], ldvt);
+ }
+ } else if (icompq == 1) {
+ iq[i__] = i__;
+ }
+/* L60: */
+ }
+
+/* If ICOMPQ = 1, use IQ(N,1) as the indicator for UPLO */
+
+ if (icompq == 1) {
+ if (iuplo == 1) {
+ iq[*n] = 1;
+ } else {
+ iq[*n] = 0;
+ }
+ }
+
+/*
+ If B is lower bidiagonal, update U by those Givens rotations
+ which rotated B to be upper bidiagonal
+*/
+
+ if (iuplo == 2 && icompq == 2) {
+ slasr_("L", "V", "B", n, n, &work[1], &work[*n], &u[u_offset], ldu);
+ }
+
+ return 0;
+
+/* End of SBDSDC */
+
+} /* sbdsdc_ */
+
+/* Subroutine */ int sbdsqr_(char *uplo, integer *n, integer *ncvt, integer *
+ nru, integer *ncc, real *d__, real *e, real *vt, integer *ldvt, real *
+ u, integer *ldu, real *c__, integer *ldc, real *work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+ real r__1, r__2, r__3, r__4;
+ doublereal d__1;
+
+ /* Builtin functions */
+ double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real *
+ , real *);
+
+ /* Local variables */
+ static real f, g, h__;
+ static integer i__, j, m;
+ static real r__, cs;
+ static integer ll;
+ static real sn, mu;
+ static integer nm1, nm12, nm13, lll;
+ static real eps, sll, tol, abse;
+ static integer idir;
+ static real abss;
+ static integer oldm;
+ static real cosl;
+ static integer isub, iter;
+ static real unfl, sinl, cosr, smin, smax, sinr;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), slas2_(real *, real *, real *, real *,
+ real *);
+ extern logical lsame_(char *, char *);
+ static real oldcs;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static integer oldll;
+ static real shift, sigmn, oldsn;
+ static integer maxit;
+ static real sminl;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ static real sigmx;
+ static logical lower;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slasq1_(integer *, real *, real *, real *, integer *),
+ slasv2_(real *, real *, real *, real *, real *, real *, real *,
+ real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real sminoa;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+ );
+ static real thresh;
+ static logical rotate;
+ static real sminlo, tolmul;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SBDSQR computes the singular value decomposition (SVD) of a real
+ N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P'
+ denotes the transpose of P), where S is a diagonal matrix with
+ non-negative diagonal elements (the singular values of B), and Q
+ and P are orthogonal matrices.
+
+ The routine computes S, and optionally computes U * Q, P' * VT,
+ or Q' * C, for given real input matrices U, VT, and C.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. vol. 11,
+ no. 5, pp. 873-912, Sept 1990) and
+ "Accurate singular values and differential qd algorithms," by
+ B. Parlett and V. Fernando, Technical Report CPAM-554, Mathematics
+ Department, University of California at Berkeley, July 1992
+ for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': B is upper bidiagonal;
+ = 'L': B is lower bidiagonal.
+
+ N (input) INTEGER
+ The order of the matrix B. N >= 0.
+
+ NCVT (input) INTEGER
+ The number of columns of the matrix VT. NCVT >= 0.
+
+ NRU (input) INTEGER
+ The number of rows of the matrix U. NRU >= 0.
+
+ NCC (input) INTEGER
+ The number of columns of the matrix C. NCC >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the n diagonal elements of the bidiagonal matrix B.
+ On exit, if INFO=0, the singular values of B in decreasing
+ order.
+
+ E (input/output) REAL array, dimension (N)
+ On entry, the elements of E contain the
+ offdiagonal elements of the bidiagonal matrix whose SVD
+ is desired. On normal exit (INFO = 0), E is destroyed.
+ If the algorithm does not converge (INFO > 0), D and E
+ will contain the diagonal and superdiagonal elements of a
+ bidiagonal matrix orthogonally equivalent to the one given
+ as input. E(N) is used for workspace.
+
+ VT (input/output) REAL array, dimension (LDVT, NCVT)
+ On entry, an N-by-NCVT matrix VT.
+ On exit, VT is overwritten by P' * VT.
+ VT is not referenced if NCVT = 0.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT.
+ LDVT >= max(1,N) if NCVT > 0; LDVT >= 1 if NCVT = 0.
+
+ U (input/output) REAL array, dimension (LDU, N)
+ On entry, an NRU-by-N matrix U.
+ On exit, U is overwritten by U * Q.
+ U is not referenced if NRU = 0.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max(1,NRU).
+
+ C (input/output) REAL array, dimension (LDC, NCC)
+ On entry, an N-by-NCC matrix C.
+ On exit, C is overwritten by Q' * C.
+ C is not referenced if NCC = 0.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C.
+ LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0.
+
+ WORK (workspace) REAL array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: If INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm did not converge; D and E contain the
+ elements of a bidiagonal matrix which is orthogonally
+ similar to the input matrix B; if INFO = i, i
+ elements of E have not converged to zero.
+
+ Internal Parameters
+ ===================
+
+ TOLMUL REAL, default = max(10,min(100,EPS**(-1/8)))
+ TOLMUL controls the convergence criterion of the QR loop.
+ If it is positive, TOLMUL*EPS is the desired relative
+ precision in the computed singular values.
+ If it is negative, abs(TOLMUL*EPS*sigma_max) is the
+ desired absolute accuracy in the computed singular
+ values (corresponds to relative accuracy
+ abs(TOLMUL*EPS) in the largest singular value.
+ abs(TOLMUL) should be between 1 and 1/EPS, and preferably
+ between 10 (for fast convergence) and .1/EPS
+ (for there to be some accuracy in the results).
+ Default is to lose at either one eighth or 2 of the
+ available decimal digits in each computed singular value
+ (whichever is smaller).
+
+ MAXITR INTEGER, default = 6
+ MAXITR controls the maximum number of passes of the
+ algorithm through its inner loop. The algorithms stops
+ (and so fails to converge) if the number of passes
+ through the inner loop exceeds MAXITR*N**2.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lower = lsame_(uplo, "L");
+ if (! lsame_(uplo, "U") && ! lower) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*ncvt < 0) {
+ *info = -3;
+ } else if (*nru < 0) {
+ *info = -4;
+ } else if (*ncc < 0) {
+ *info = -5;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -9;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -11;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SBDSQR", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ goto L160;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = ((*ncvt > 0) || (*nru > 0)) || (*ncc > 0);
+
+/* If no singular vectors desired, use qd algorithm */
+
+ if (! rotate) {
+ slasq1_(n, &d__[1], &e[1], &work[1], info);
+ return 0;
+ }
+
+ nm1 = *n - 1;
+ nm12 = nm1 + nm1;
+ nm13 = nm12 + nm1;
+ idir = 0;
+
+/* Get machine constants */
+
+ eps = slamch_("Epsilon");
+ unfl = slamch_("Safe minimum");
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left
+*/
+
+ if (lower) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ work[i__] = cs;
+ work[nm1 + i__] = sn;
+/* L10: */
+ }
+
+/* Update singular vectors if desired */
+
+ if (*nru > 0) {
+ slasr_("R", "V", "F", nru, n, &work[1], &work[*n], &u[u_offset],
+ ldu);
+ }
+ if (*ncc > 0) {
+ slasr_("L", "V", "F", n, ncc, &work[1], &work[*n], &c__[c_offset],
+ ldc);
+ }
+ }
+
+/*
+ Compute singular values to relative accuracy TOL
+ (By setting TOL to be negative, algorithm will compute
+ singular values to absolute accuracy ABS(TOL)*norm(input matrix))
+
+ Computing MAX
+ Computing MIN
+*/
+ d__1 = (doublereal) eps;
+ r__3 = 100.f, r__4 = pow_dd(&d__1, &c_b2944);
+ r__1 = 10.f, r__2 = dmin(r__3,r__4);
+ tolmul = dmax(r__1,r__2);
+ tol = tolmul * eps;
+
+/* Compute approximate maximum, minimum singular values */
+
+ smax = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = d__[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L20: */
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = smax, r__3 = (r__1 = e[i__], dabs(r__1));
+ smax = dmax(r__2,r__3);
+/* L30: */
+ }
+ sminl = 0.f;
+ if (tol >= 0.f) {
+
+/* Relative accuracy desired */
+
+ sminoa = dabs(d__[1]);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+ mu = sminoa;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ mu = (r__2 = d__[i__], dabs(r__2)) * (mu / (mu + (r__1 = e[i__ -
+ 1], dabs(r__1))));
+ sminoa = dmin(sminoa,mu);
+ if (sminoa == 0.f) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ sminoa /= sqrt((real) (*n));
+/* Computing MAX */
+ r__1 = tol * sminoa, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ } else {
+
+/*
+ Absolute accuracy desired
+
+ Computing MAX
+*/
+ r__1 = dabs(tol) * smax, r__2 = *n * 6 * *n * unfl;
+ thresh = dmax(r__1,r__2);
+ }
+
+/*
+ Prepare for main iteration loop for the singular values
+ (MAXIT is the maximum number of passes through the inner
+ loop permitted before nonconvergence signalled.)
+*/
+
+ maxit = *n * 6 * *n;
+ iter = 0;
+ oldll = -1;
+ oldm = -1;
+
+/* M points to last element of unconverged part of matrix */
+
+ m = *n;
+
+/* Begin main iteration loop */
+
+L60:
+
+/* Check for convergence or exceeding iteration count */
+
+ if (m <= 1) {
+ goto L160;
+ }
+ if (iter > maxit) {
+ goto L200;
+ }
+
+/* Find diagonal block of matrix to work on */
+
+ if (tol < 0.f && (r__1 = d__[m], dabs(r__1)) <= thresh) {
+ d__[m] = 0.f;
+ }
+ smax = (r__1 = d__[m], dabs(r__1));
+ smin = smax;
+ i__1 = m - 1;
+ for (lll = 1; lll <= i__1; ++lll) {
+ ll = m - lll;
+ abss = (r__1 = d__[ll], dabs(r__1));
+ abse = (r__1 = e[ll], dabs(r__1));
+ if (tol < 0.f && abss <= thresh) {
+ d__[ll] = 0.f;
+ }
+ if (abse <= thresh) {
+ goto L80;
+ }
+ smin = dmin(smin,abss);
+/* Computing MAX */
+ r__1 = max(smax,abss);
+ smax = dmax(r__1,abse);
+/* L70: */
+ }
+ ll = 0;
+ goto L90;
+L80:
+ e[ll] = 0.f;
+
+/* Matrix splits since E(LL) = 0 */
+
+ if (ll == m - 1) {
+
+/* Convergence of bottom singular value, return to top of loop */
+
+ --m;
+ goto L60;
+ }
+L90:
+ ++ll;
+
+/* E(LL) through E(M-1) are nonzero, E(LL-1) is zero */
+
+ if (ll == m - 1) {
+
+/* 2 by 2 block, handle separately */
+
+ slasv2_(&d__[m - 1], &e[m - 1], &d__[m], &sigmn, &sigmx, &sinr, &cosr,
+ &sinl, &cosl);
+ d__[m - 1] = sigmx;
+ e[m - 1] = 0.f;
+ d__[m] = sigmn;
+
+/* Compute singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ srot_(ncvt, &vt[m - 1 + vt_dim1], ldvt, &vt[m + vt_dim1], ldvt, &
+ cosr, &sinr);
+ }
+ if (*nru > 0) {
+ srot_(nru, &u[(m - 1) * u_dim1 + 1], &c__1, &u[m * u_dim1 + 1], &
+ c__1, &cosl, &sinl);
+ }
+ if (*ncc > 0) {
+ srot_(ncc, &c__[m - 1 + c_dim1], ldc, &c__[m + c_dim1], ldc, &
+ cosl, &sinl);
+ }
+ m += -2;
+ goto L60;
+ }
+
+/*
+ If working on new submatrix, choose shift direction
+ (from larger end diagonal element towards smaller)
+*/
+
+ if ((ll > oldm) || (m < oldll)) {
+ if ((r__1 = d__[ll], dabs(r__1)) >= (r__2 = d__[m], dabs(r__2))) {
+
+/* Chase bulge from top (big end) to bottom (small end) */
+
+ idir = 1;
+ } else {
+
+/* Chase bulge from bottom (big end) to top (small end) */
+
+ idir = 2;
+ }
+ }
+
+/* Apply convergence tests */
+
+ if (idir == 1) {
+
+/*
+ Run convergence test in forward direction
+ First apply standard test to bottom of matrix
+*/
+
+ if (((r__2 = e[m - 1], dabs(r__2)) <= dabs(tol) * (r__1 = d__[m],
+ dabs(r__1))) || (tol < 0.f && (r__3 = e[m - 1], dabs(r__3)) <=
+ thresh)) {
+ e[m - 1] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion forward
+*/
+
+ mu = (r__1 = d__[ll], dabs(r__1));
+ sminl = mu;
+ i__1 = m - 1;
+ for (lll = ll; lll <= i__1; ++lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (r__2 = d__[lll + 1], dabs(r__2)) * (mu / (mu + (r__1 =
+ e[lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L100: */
+ }
+ }
+
+ } else {
+
+/*
+ Run convergence test in backward direction
+ First apply standard test to top of matrix
+*/
+
+ if (((r__2 = e[ll], dabs(r__2)) <= dabs(tol) * (r__1 = d__[ll], dabs(
+ r__1))) || (tol < 0.f && (r__3 = e[ll], dabs(r__3)) <= thresh)
+ ) {
+ e[ll] = 0.f;
+ goto L60;
+ }
+
+ if (tol >= 0.f) {
+
+/*
+ If relative accuracy desired,
+ apply convergence criterion backward
+*/
+
+ mu = (r__1 = d__[m], dabs(r__1));
+ sminl = mu;
+ i__1 = ll;
+ for (lll = m - 1; lll >= i__1; --lll) {
+ if ((r__1 = e[lll], dabs(r__1)) <= tol * mu) {
+ e[lll] = 0.f;
+ goto L60;
+ }
+ sminlo = sminl;
+ mu = (r__2 = d__[lll], dabs(r__2)) * (mu / (mu + (r__1 = e[
+ lll], dabs(r__1))));
+ sminl = dmin(sminl,mu);
+/* L110: */
+ }
+ }
+ }
+ oldll = ll;
+ oldm = m;
+
+/*
+ Compute shift. First, test if shifting would ruin relative
+ accuracy, and if so set the shift to zero.
+
+ Computing MAX
+*/
+ r__1 = eps, r__2 = tol * .01f;
+ if (tol >= 0.f && *n * tol * (sminl / smax) <= dmax(r__1,r__2)) {
+
+/* Use a zero shift to avoid loss of relative accuracy */
+
+ shift = 0.f;
+ } else {
+
+/* Compute the shift from 2-by-2 block at end of matrix */
+
+ if (idir == 1) {
+ sll = (r__1 = d__[ll], dabs(r__1));
+ slas2_(&d__[m - 1], &e[m - 1], &d__[m], &shift, &r__);
+ } else {
+ sll = (r__1 = d__[m], dabs(r__1));
+ slas2_(&d__[ll], &e[ll], &d__[ll + 1], &shift, &r__);
+ }
+
+/* Test if shift negligible, and if so set to zero */
+
+ if (sll > 0.f) {
+/* Computing 2nd power */
+ r__1 = shift / sll;
+ if (r__1 * r__1 < eps) {
+ shift = 0.f;
+ }
+ }
+ }
+
+/* Increment iteration count */
+
+ iter = iter + m - ll;
+
+/* If SHIFT = 0, do simplified QR iteration */
+
+ if (shift == 0.f) {
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__], &cs, &sn, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ + 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll + 1] = cs;
+ work[i__ - ll + 1 + nm1] = sn;
+ work[i__ - ll + 1 + nm12] = oldcs;
+ work[i__ - ll + 1 + nm13] = oldsn;
+/* L120: */
+ }
+ h__ = d__[m] * cs;
+ d__[m] = h__ * oldcs;
+ e[m - 1] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ cs = 1.f;
+ oldcs = 1.f;
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ r__1 = d__[i__] * cs;
+ slartg_(&r__1, &e[i__ - 1], &cs, &sn, &r__);
+ if (i__ < m) {
+ e[i__] = oldsn * r__;
+ }
+ r__1 = oldcs * r__;
+ r__2 = d__[i__ - 1] * sn;
+ slartg_(&r__1, &r__2, &oldcs, &oldsn, &d__[i__]);
+ work[i__ - ll] = cs;
+ work[i__ - ll + nm1] = -sn;
+ work[i__ - ll + nm12] = oldcs;
+ work[i__ - ll + nm13] = -oldsn;
+/* L130: */
+ }
+ h__ = d__[ll] * cs;
+ d__[ll] = h__ * oldcs;
+ e[ll] = h__ * oldsn;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+ }
+ } else {
+
+/* Use nonzero shift */
+
+ if (idir == 1) {
+
+/*
+ Chase bulge from top to bottom
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((r__1 = d__[ll], dabs(r__1)) - shift) * (r_sign(&c_b1011, &
+ d__[ll]) + shift / d__[ll]);
+ g = e[ll];
+ i__1 = m - 1;
+ for (i__ = ll; i__ <= i__1; ++i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ > ll) {
+ e[i__ - 1] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__];
+ e[i__] = cosr * e[i__] - sinr * d__[i__];
+ g = sinr * d__[i__ + 1];
+ d__[i__ + 1] = cosr * d__[i__ + 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__] + sinl * d__[i__ + 1];
+ d__[i__ + 1] = cosl * d__[i__ + 1] - sinl * e[i__];
+ if (i__ < m - 1) {
+ g = sinl * e[i__ + 1];
+ e[i__ + 1] = cosl * e[i__ + 1];
+ }
+ work[i__ - ll + 1] = cosr;
+ work[i__ - ll + 1 + nm1] = sinr;
+ work[i__ - ll + 1 + nm12] = cosl;
+ work[i__ - ll + 1 + nm13] = sinl;
+/* L140: */
+ }
+ e[m - 1] = f;
+
+/* Update singular vectors */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncvt, &work[1], &work[*n], &vt[
+ ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "F", nru, &i__1, &work[nm12 + 1], &work[nm13
+ + 1], &u[ll * u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "F", &i__1, ncc, &work[nm12 + 1], &work[nm13
+ + 1], &c__[ll + c_dim1], ldc);
+ }
+
+/* Test convergence */
+
+ if ((r__1 = e[m - 1], dabs(r__1)) <= thresh) {
+ e[m - 1] = 0.f;
+ }
+
+ } else {
+
+/*
+ Chase bulge from bottom to top
+ Save cosines and sines for later singular vector updates
+*/
+
+ f = ((r__1 = d__[m], dabs(r__1)) - shift) * (r_sign(&c_b1011, &
+ d__[m]) + shift / d__[m]);
+ g = e[m - 1];
+ i__1 = ll + 1;
+ for (i__ = m; i__ >= i__1; --i__) {
+ slartg_(&f, &g, &cosr, &sinr, &r__);
+ if (i__ < m) {
+ e[i__] = r__;
+ }
+ f = cosr * d__[i__] + sinr * e[i__ - 1];
+ e[i__ - 1] = cosr * e[i__ - 1] - sinr * d__[i__];
+ g = sinr * d__[i__ - 1];
+ d__[i__ - 1] = cosr * d__[i__ - 1];
+ slartg_(&f, &g, &cosl, &sinl, &r__);
+ d__[i__] = r__;
+ f = cosl * e[i__ - 1] + sinl * d__[i__ - 1];
+ d__[i__ - 1] = cosl * d__[i__ - 1] - sinl * e[i__ - 1];
+ if (i__ > ll + 1) {
+ g = sinl * e[i__ - 2];
+ e[i__ - 2] = cosl * e[i__ - 2];
+ }
+ work[i__ - ll] = cosr;
+ work[i__ - ll + nm1] = -sinr;
+ work[i__ - ll + nm12] = cosl;
+ work[i__ - ll + nm13] = -sinl;
+/* L150: */
+ }
+ e[ll] = f;
+
+/* Test convergence */
+
+ if ((r__1 = e[ll], dabs(r__1)) <= thresh) {
+ e[ll] = 0.f;
+ }
+
+/* Update singular vectors if desired */
+
+ if (*ncvt > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncvt, &work[nm12 + 1], &work[
+ nm13 + 1], &vt[ll + vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ i__1 = m - ll + 1;
+ slasr_("R", "V", "B", nru, &i__1, &work[1], &work[*n], &u[ll *
+ u_dim1 + 1], ldu);
+ }
+ if (*ncc > 0) {
+ i__1 = m - ll + 1;
+ slasr_("L", "V", "B", &i__1, ncc, &work[1], &work[*n], &c__[
+ ll + c_dim1], ldc);
+ }
+ }
+ }
+
+/* QR iteration finished, go back and check convergence */
+
+ goto L60;
+
+/* All singular values converged, so make them positive */
+
+L160:
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] < 0.f) {
+ d__[i__] = -d__[i__];
+
+/* Change sign of singular vectors, if desired */
+
+ if (*ncvt > 0) {
+ sscal_(ncvt, &c_b1290, &vt[i__ + vt_dim1], ldvt);
+ }
+ }
+/* L170: */
+ }
+
+/*
+ Sort the singular values into decreasing order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I) */
+
+ isub = 1;
+ smin = d__[1];
+ i__2 = *n + 1 - i__;
+ for (j = 2; j <= i__2; ++j) {
+ if (d__[j] <= smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L180: */
+ }
+ if (isub != *n + 1 - i__) {
+
+/* Swap singular values and vectors */
+
+ d__[isub] = d__[*n + 1 - i__];
+ d__[*n + 1 - i__] = smin;
+ if (*ncvt > 0) {
+ sswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[*n + 1 - i__ +
+ vt_dim1], ldvt);
+ }
+ if (*nru > 0) {
+ sswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[(*n + 1 - i__) *
+ u_dim1 + 1], &c__1);
+ }
+ if (*ncc > 0) {
+ sswap_(ncc, &c__[isub + c_dim1], ldc, &c__[*n + 1 - i__ +
+ c_dim1], ldc);
+ }
+ }
+/* L190: */
+ }
+ goto L220;
+
+/* Maximum number of iterations exceeded, failure to converge */
+
+L200:
+ *info = 0;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L210: */
+ }
+L220:
+ return 0;
+
+/* End of SBDSQR */
+
+} /* sbdsqr_ */
+
+/* Subroutine */ int sgebak_(char *job, char *side, integer *n, integer *ilo,
+ integer *ihi, real *scale, integer *m, real *v, integer *ldv, integer
+ *info)
+{
+ /* System generated locals */
+ integer v_dim1, v_offset, i__1;
+
+ /* Local variables */
+ static integer i__, k;
+ static real s;
+ static integer ii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static logical leftv;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *);
+ static logical rightv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SGEBAK forms the right or left eigenvectors of a real general matrix
+ by backward transformation on the computed eigenvectors of the
+ balanced matrix output by SGEBAL.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the type of backward transformation required:
+ = 'N', do nothing, return immediately;
+ = 'P', do backward transformation for permutation only;
+ = 'S', do backward transformation for scaling only;
+ = 'B', do backward transformations for both permutation and
+ scaling.
+ JOB must be the same as the argument JOB supplied to SGEBAL.
+
+ SIDE (input) CHARACTER*1
+ = 'R': V contains right eigenvectors;
+ = 'L': V contains left eigenvectors.
+
+ N (input) INTEGER
+ The number of rows of the matrix V. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ The integers ILO and IHI determined by SGEBAL.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ SCALE (input) REAL array, dimension (N)
+ Details of the permutation and scaling factors, as returned
+ by SGEBAL.
+
+ M (input) INTEGER
+ The number of columns of the matrix V. M >= 0.
+
+ V (input/output) REAL array, dimension (LDV,M)
+ On entry, the matrix of right or left eigenvectors to be
+ transformed, as returned by SHSEIN or STREVC.
+ On exit, V is overwritten by the transformed eigenvectors.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V. LDV >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Decode and Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --scale;
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+
+ /* Function Body */
+ rightv = lsame_(side, "R");
+ leftv = lsame_(side, "L");
+
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (! rightv && ! leftv) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*m < 0) {
+ *info = -7;
+ } else if (*ldv < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBAK", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*m == 0) {
+ return 0;
+ }
+ if (lsame_(job, "N")) {
+ return 0;
+ }
+
+ if (*ilo == *ihi) {
+ goto L30;
+ }
+
+/* Backward balance */
+
+ if ((lsame_(job, "S")) || (lsame_(job, "B"))) {
+
+ if (rightv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = scale[i__];
+ sscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L10: */
+ }
+ }
+
+ if (leftv) {
+ i__1 = *ihi;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+ s = 1.f / scale[i__];
+ sscal_(m, &s, &v[i__ + v_dim1], ldv);
+/* L20: */
+ }
+ }
+
+ }
+
+/*
+ Backward permutation
+
+ For I = ILO-1 step -1 until 1,
+ IHI+1 step 1 until N do --
+*/
+
+L30:
+ if ((lsame_(job, "P")) || (lsame_(job, "B"))) {
+ if (rightv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L40;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L40;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L40:
+ ;
+ }
+ }
+
+ if (leftv) {
+ i__1 = *n;
+ for (ii = 1; ii <= i__1; ++ii) {
+ i__ = ii;
+ if (i__ >= *ilo && i__ <= *ihi) {
+ goto L50;
+ }
+ if (i__ < *ilo) {
+ i__ = *ilo - ii;
+ }
+ k = scale[i__];
+ if (k == i__) {
+ goto L50;
+ }
+ sswap_(m, &v[i__ + v_dim1], ldv, &v[k + v_dim1], ldv);
+L50:
+ ;
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SGEBAK */
+
+} /* sgebak_ */
+
+/* Subroutine */ int sgebal_(char *job, integer *n, real *a, integer *lda,
+ integer *ilo, integer *ihi, real *scale, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Local variables */
+ static real c__, f, g;
+ static integer i__, j, k, l, m;
+ static real r__, s, ca, ra;
+ static integer ica, ira, iexc;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sswap_(integer *, real *, integer *, real *, integer *);
+ static real sfmin1, sfmin2, sfmax1, sfmax2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ static logical noconv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SGEBAL balances a general real matrix A. This involves, first,
+ permuting A by a similarity transformation to isolate eigenvalues
+ in the first 1 to ILO-1 and last IHI+1 to N elements on the
+ diagonal; and second, applying a diagonal similarity transformation
+ to rows and columns ILO to IHI to make the rows and columns as
+ close in norm as possible. Both steps are optional.
+
+ Balancing may reduce the 1-norm of the matrix, and improve the
+ accuracy of the computed eigenvalues and/or eigenvectors.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ Specifies the operations to be performed on A:
+ = 'N': none: simply set ILO = 1, IHI = N, SCALE(I) = 1.0
+ for i = 1,...,N;
+ = 'P': permute only;
+ = 'S': scale only;
+ = 'B': both permute and scale.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the input matrix A.
+ On exit, A is overwritten by the balanced matrix.
+ If JOB = 'N', A is not referenced.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ ILO (output) INTEGER
+ IHI (output) INTEGER
+ ILO and IHI are set to integers such that on exit
+ A(i,j) = 0 if i > j and j = 1,...,ILO-1 or I = IHI+1,...,N.
+ If JOB = 'N' or 'S', ILO = 1 and IHI = N.
+
+ SCALE (output) REAL array, dimension (N)
+ Details of the permutations and scaling factors applied to
+ A. If P(j) is the index of the row and column interchanged
+ with row and column j and D(j) is the scaling factor
+ applied to row and column j, then
+ SCALE(j) = P(j) for j = 1,...,ILO-1
+ = D(j) for j = ILO,...,IHI
+ = P(j) for j = IHI+1,...,N.
+ The order in which the interchanges are made is N to IHI+1,
+ then 1 to ILO-1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The permutations consist of row and column interchanges which put
+ the matrix in the form
+
+ ( T1 X Y )
+ P A P = ( 0 B Z )
+ ( 0 0 T2 )
+
+ where T1 and T2 are upper triangular matrices whose eigenvalues lie
+ along the diagonal. The column indices ILO and IHI mark the starting
+ and ending columns of the submatrix B. Balancing consists of applying
+ a diagonal similarity transformation inv(D) * B * D to make the
+ 1-norms of each row of B and its corresponding column nearly equal.
+ The output matrix is
+
+ ( T1 X*D Y )
+ ( 0 inv(D)*B*D inv(D)*Z ).
+ ( 0 0 T2 )
+
+ Information about the permutations P and the diagonal matrix D is
+ returned in the vector SCALE.
+
+ This subroutine is based on the EISPACK routine BALANC.
+
+ Modified by Tzu-Yi Chen, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --scale;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBAL", &i__1);
+ return 0;
+ }
+
+ k = 1;
+ l = *n;
+
+ if (*n == 0) {
+ goto L210;
+ }
+
+ if (lsame_(job, "N")) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L10: */
+ }
+ goto L210;
+ }
+
+ if (lsame_(job, "S")) {
+ goto L120;
+ }
+
+/* Permutation to isolate eigenvalues if possible */
+
+ goto L50;
+
+/* Row and column exchange. */
+
+L20:
+ scale[m] = (real) j;
+ if (j == m) {
+ goto L30;
+ }
+
+ sswap_(&l, &a[j * a_dim1 + 1], &c__1, &a[m * a_dim1 + 1], &c__1);
+ i__1 = *n - k + 1;
+ sswap_(&i__1, &a[j + k * a_dim1], lda, &a[m + k * a_dim1], lda);
+
+L30:
+ switch (iexc) {
+ case 1: goto L40;
+ case 2: goto L80;
+ }
+
+/* Search for rows isolating an eigenvalue and push them down. */
+
+L40:
+ if (l == 1) {
+ goto L210;
+ }
+ --l;
+
+L50:
+ for (j = l; j >= 1; --j) {
+
+ i__1 = l;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ == j) {
+ goto L60;
+ }
+ if (a[j + i__ * a_dim1] != 0.f) {
+ goto L70;
+ }
+L60:
+ ;
+ }
+
+ m = l;
+ iexc = 1;
+ goto L20;
+L70:
+ ;
+ }
+
+ goto L90;
+
+/* Search for columns isolating an eigenvalue and push them left. */
+
+L80:
+ ++k;
+
+L90:
+ i__1 = l;
+ for (j = k; j <= i__1; ++j) {
+
+ i__2 = l;
+ for (i__ = k; i__ <= i__2; ++i__) {
+ if (i__ == j) {
+ goto L100;
+ }
+ if (a[i__ + j * a_dim1] != 0.f) {
+ goto L110;
+ }
+L100:
+ ;
+ }
+
+ m = k;
+ iexc = 2;
+ goto L20;
+L110:
+ ;
+ }
+
+L120:
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ scale[i__] = 1.f;
+/* L130: */
+ }
+
+ if (lsame_(job, "P")) {
+ goto L210;
+ }
+
+/*
+ Balance the submatrix in rows K to L.
+
+ Iterative loop for norm reduction
+*/
+
+ sfmin1 = slamch_("S") / slamch_("P");
+ sfmax1 = 1.f / sfmin1;
+ sfmin2 = sfmin1 * 8.f;
+ sfmax2 = 1.f / sfmin2;
+L140:
+ noconv = FALSE_;
+
+ i__1 = l;
+ for (i__ = k; i__ <= i__1; ++i__) {
+ c__ = 0.f;
+ r__ = 0.f;
+
+ i__2 = l;
+ for (j = k; j <= i__2; ++j) {
+ if (j == i__) {
+ goto L150;
+ }
+ c__ += (r__1 = a[j + i__ * a_dim1], dabs(r__1));
+ r__ += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+L150:
+ ;
+ }
+ ica = isamax_(&l, &a[i__ * a_dim1 + 1], &c__1);
+ ca = (r__1 = a[ica + i__ * a_dim1], dabs(r__1));
+ i__2 = *n - k + 1;
+ ira = isamax_(&i__2, &a[i__ + k * a_dim1], lda);
+ ra = (r__1 = a[i__ + (ira + k - 1) * a_dim1], dabs(r__1));
+
+/* Guard against zero C or R due to underflow. */
+
+ if ((c__ == 0.f) || (r__ == 0.f)) {
+ goto L200;
+ }
+ g = r__ / 8.f;
+ f = 1.f;
+ s = c__ + r__;
+L160:
+/* Computing MAX */
+ r__1 = max(f,c__);
+/* Computing MIN */
+ r__2 = min(r__,g);
+ if (((c__ >= g) || (dmax(r__1,ca) >= sfmax2)) || (dmin(r__2,ra) <=
+ sfmin2)) {
+ goto L170;
+ }
+ f *= 8.f;
+ c__ *= 8.f;
+ ca *= 8.f;
+ r__ /= 8.f;
+ g /= 8.f;
+ ra /= 8.f;
+ goto L160;
+
+L170:
+ g = c__ / 8.f;
+L180:
+/* Computing MIN */
+ r__1 = min(f,c__), r__1 = min(r__1,g);
+ if (((g < r__) || (dmax(r__,ra) >= sfmax2)) || (dmin(r__1,ca) <=
+ sfmin2)) {
+ goto L190;
+ }
+ f /= 8.f;
+ c__ /= 8.f;
+ g /= 8.f;
+ ca /= 8.f;
+ r__ *= 8.f;
+ ra *= 8.f;
+ goto L180;
+
+/* Now balance. */
+
+L190:
+ if (c__ + r__ >= s * .95f) {
+ goto L200;
+ }
+ if (f < 1.f && scale[i__] < 1.f) {
+ if (f * scale[i__] <= sfmin1) {
+ goto L200;
+ }
+ }
+ if (f > 1.f && scale[i__] > 1.f) {
+ if (scale[i__] >= sfmax1 / f) {
+ goto L200;
+ }
+ }
+ g = 1.f / f;
+ scale[i__] *= f;
+ noconv = TRUE_;
+
+ i__2 = *n - k + 1;
+ sscal_(&i__2, &g, &a[i__ + k * a_dim1], lda);
+ sscal_(&l, &f, &a[i__ * a_dim1 + 1], &c__1);
+
+L200:
+ ;
+ }
+
+ if (noconv) {
+ goto L140;
+ }
+
+L210:
+ *ilo = k;
+ *ihi = l;
+
+ return 0;
+
+/* End of SGEBAL */
+
+} /* sgebal_ */
+
+/* Subroutine */ int sgebd2_(integer *m, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tauq, real *taup, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SGEBD2 reduces a real general m by n matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q' * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) REAL array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace) REAL array, dimension (max(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBD2", &i__1);
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tauq[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector G(i) to annihilate
+ A(i,i+2:n)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Apply G(i) to A(i+1:m,i+1:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ slarf_("Right", &i__2, &i__3, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &taup[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ } else {
+ taup[i__] = 0.f;
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector G(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Apply G(i) to A(i+1:m,i:n) from the right */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+/* Computing MIN */
+ i__4 = i__ + 1;
+ slarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &taup[
+ i__], &a[min(i__4,*m) + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = d__[i__];
+
+ if (i__ < *m) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:m,i)
+*/
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(i+1:m,i+1:n) from the left */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tauq[i__], &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &work[1]);
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ } else {
+ tauq[i__] = 0.f;
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SGEBD2 */
+
+} /* sgebd2_ */
+
+/* Subroutine */ int sgebrd_(integer *m, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tauq, real *taup, real *work, integer *
+ lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, nb, nx;
+ static real ws;
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer minmn;
+ extern /* Subroutine */ int sgebd2_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, integer *), slabrd_(
+ integer *, integer *, integer *, real *, integer *, real *, real *
+ , real *, real *, real *, integer *, real *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwrkx, ldwrky, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SGEBRD reduces a general real M-by-N matrix A to upper or lower
+ bidiagonal form B by an orthogonal transformation: Q**T * A * P = B.
+
+ If m >= n, B is upper bidiagonal; if m < n, B is lower bidiagonal.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns in the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N general matrix to be reduced.
+ On exit,
+ if m >= n, the diagonal and the first superdiagonal are
+ overwritten with the upper bidiagonal matrix B; the
+ elements below the diagonal, with the array TAUQ, represent
+ the orthogonal matrix Q as a product of elementary
+ reflectors, and the elements above the first superdiagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors;
+ if m < n, the diagonal and the first subdiagonal are
+ overwritten with the lower bidiagonal matrix B; the
+ elements below the first subdiagonal, with the array TAUQ,
+ represent the orthogonal matrix Q as a product of
+ elementary reflectors, and the elements above the diagonal,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (min(M,N))
+ The diagonal elements of the bidiagonal matrix B:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (min(M,N)-1)
+ The off-diagonal elements of the bidiagonal matrix B:
+ if m >= n, E(i) = A(i,i+1) for i = 1,2,...,n-1;
+ if m < n, E(i) = A(i+1,i) for i = 1,2,...,m-1.
+
+ TAUQ (output) REAL array dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,M,N).
+ For optimum performance LWORK >= (M+N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ If m >= n,
+
+ Q = H(1) H(2) . . . H(n) and P = G(1) G(2) . . . G(n-1)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i-1) = 0, v(i) = 1, and v(i+1:m) is stored on exit in A(i+1:m,i);
+ u(1:i) = 0, u(i+1) = 1, and u(i+2:n) is stored on exit in A(i,i+2:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n,
+
+ Q = H(1) H(2) . . . H(m-1) and P = G(1) G(2) . . . G(m)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors;
+ v(1:i) = 0, v(i+1) = 1, and v(i+2:m) is stored on exit in A(i+2:m,i);
+ u(1:i-1) = 0, u(i) = 1, and u(i+1:n) is stored on exit in A(i,i+1:n);
+ tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The contents of A on exit are illustrated by the following examples:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
+ ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
+ ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
+ ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
+ ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
+ ( v1 v2 v3 v4 v5 )
+
+ where d and e denote diagonal and off-diagonal elements of B, vi
+ denotes an element of the vector defining H(i), and ui an element of
+ the vector defining G(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MAX */
+ i__1 = 1, i__2 = ilaenv_(&c__1, "SGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = max(i__1,i__2);
+ lwkopt = (*m + *n) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = max(1,*m);
+ if (*lwork < max(i__1,*n) && ! lquery) {
+ *info = -10;
+ }
+ }
+ if (*info < 0) {
+ i__1 = -(*info);
+ xerbla_("SGEBRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ minmn = min(*m,*n);
+ if (minmn == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ ws = (real) max(*m,*n);
+ ldwrkx = *m;
+ ldwrky = *n;
+
+ if (nb > 1 && nb < minmn) {
+
+/*
+ Set the crossover point NX.
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+
+/* Determine when to switch from blocked to unblocked code. */
+
+ if (nx < minmn) {
+ ws = (real) ((*m + *n) * nb);
+ if ((real) (*lwork) < ws) {
+
+/*
+ Not enough work space for the optimal NB, consider using
+ a smaller block size.
+*/
+
+ nbmin = ilaenv_(&c__2, "SGEBRD", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ if (*lwork >= (*m + *n) * nbmin) {
+ nb = *lwork / (*m + *n);
+ } else {
+ nb = 1;
+ nx = minmn;
+ }
+ }
+ }
+ } else {
+ nx = minmn;
+ }
+
+ i__1 = minmn - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+
+/*
+ Reduce rows and columns i:i+nb-1 to bidiagonal form and return
+ the matrices X and Y which are needed to update the unreduced
+ part of the matrix
+*/
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ + 1;
+ slabrd_(&i__3, &i__4, &nb, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[
+ i__], &tauq[i__], &taup[i__], &work[1], &ldwrkx, &work[ldwrkx
+ * nb + 1], &ldwrky);
+
+/*
+ Update the trailing submatrix A(i+nb:m,i+nb:n), using an update
+ of the form A := A - V*Y' - X*U'
+*/
+
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ sgemm_("No transpose", "Transpose", &i__3, &i__4, &nb, &c_b1290, &a[
+ i__ + nb + i__ * a_dim1], lda, &work[ldwrkx * nb + nb + 1], &
+ ldwrky, &c_b1011, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+ i__3 = *m - i__ - nb + 1;
+ i__4 = *n - i__ - nb + 1;
+ sgemm_("No transpose", "No transpose", &i__3, &i__4, &nb, &c_b1290, &
+ work[nb + 1], &ldwrkx, &a[i__ + (i__ + nb) * a_dim1], lda, &
+ c_b1011, &a[i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/* Copy diagonal and off-diagonal elements of B back into A */
+
+ if (*m >= *n) {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + (j + 1) * a_dim1] = e[j];
+/* L10: */
+ }
+ } else {
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + j * a_dim1] = d__[j];
+ a[j + 1 + j * a_dim1] = e[j];
+/* L20: */
+ }
+ }
+/* L30: */
+ }
+
+/* Use unblocked code to reduce the remainder of the matrix */
+
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgebd2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__], &
+ tauq[i__], &taup[i__], &work[1], &iinfo);
+ work[1] = ws;
+ return 0;
+
+/* End of SGEBRD */
+
+} /* sgebrd_ */
+
+/* Subroutine */ int sgeev_(char *jobvl, char *jobvr, integer *n, real *a,
+ integer *lda, real *wr, real *wi, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3, i__4;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k;
+ static real r__, cs, sn;
+ static integer ihi;
+ static real scl;
+ static integer ilo;
+ static real dum[1], eps;
+ static integer ibal;
+ static char side[1];
+ static integer maxb;
+ static real anrm;
+ static integer ierr, itau, iwrk, nout;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ static logical scalea;
+ static real cscale;
+ extern /* Subroutine */ int sgebak_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *), sgebal_(char *, integer *, real *, integer *,
+ integer *, integer *, real *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int sgehrd_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical select[1];
+ static real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slartg_(real *, real *,
+ real *, real *, real *), sorghr_(integer *, integer *, integer *,
+ real *, integer *, real *, real *, integer *, integer *), shseqr_(
+ char *, char *, integer *, integer *, integer *, real *, integer *
+ , real *, real *, real *, integer *, real *, integer *, integer *), strevc_(char *, char *, logical *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, integer *
+ , integer *, real *, integer *);
+ static integer minwrk, maxwrk;
+ static logical wantvl;
+ static real smlnum;
+ static integer hswork;
+ static logical lquery, wantvr;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 8, 1999
+
+
+ Purpose
+ =======
+
+ SGEEV computes for an N-by-N real nonsymmetric matrix A, the
+ eigenvalues and, optionally, the left and/or right eigenvectors.
+
+ The right eigenvector v(j) of A satisfies
+ A * v(j) = lambda(j) * v(j)
+ where lambda(j) is its eigenvalue.
+ The left eigenvector u(j) of A satisfies
+ u(j)**H * A = lambda(j) * u(j)**H
+ where u(j)**H denotes the conjugate transpose of u(j).
+
+ The computed eigenvectors are normalized to have Euclidean norm
+ equal to 1 and largest component real.
+
+ Arguments
+ =========
+
+ JOBVL (input) CHARACTER*1
+ = 'N': left eigenvectors of A are not computed;
+ = 'V': left eigenvectors of A are computed.
+
+ JOBVR (input) CHARACTER*1
+ = 'N': right eigenvectors of A are not computed;
+ = 'V': right eigenvectors of A are computed.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the N-by-N matrix A.
+ On exit, A has been overwritten.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ WR (output) REAL array, dimension (N)
+ WI (output) REAL array, dimension (N)
+ WR and WI contain the real and imaginary parts,
+ respectively, of the computed eigenvalues. Complex
+ conjugate pairs of eigenvalues appear consecutively
+ with the eigenvalue having the positive imaginary part
+ first.
+
+ VL (output) REAL array, dimension (LDVL,N)
+ If JOBVL = 'V', the left eigenvectors u(j) are stored one
+ after another in the columns of VL, in the same order
+ as their eigenvalues.
+ If JOBVL = 'N', VL is not referenced.
+ If the j-th eigenvalue is real, then u(j) = VL(:,j),
+ the j-th column of VL.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then u(j) = VL(:,j) + i*VL(:,j+1) and
+ u(j+1) = VL(:,j) - i*VL(:,j+1).
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= 1; if
+ JOBVL = 'V', LDVL >= N.
+
+ VR (output) REAL array, dimension (LDVR,N)
+ If JOBVR = 'V', the right eigenvectors v(j) are stored one
+ after another in the columns of VR, in the same order
+ as their eigenvalues.
+ If JOBVR = 'N', VR is not referenced.
+ If the j-th eigenvalue is real, then v(j) = VR(:,j),
+ the j-th column of VR.
+ If the j-th and (j+1)-st eigenvalues form a complex
+ conjugate pair, then v(j) = VR(:,j) + i*VR(:,j+1) and
+ v(j+1) = VR(:,j) - i*VR(:,j+1).
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= 1; if
+ JOBVR = 'V', LDVR >= N.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,3*N), and
+ if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good
+ performance, LWORK must generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = i, the QR algorithm failed to compute all the
+ eigenvalues, and no eigenvectors have been computed;
+ elements i+1:N of WR and WI contain eigenvalues which
+ have converged.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --wr;
+ --wi;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ lquery = *lwork == -1;
+ wantvl = lsame_(jobvl, "V");
+ wantvr = lsame_(jobvr, "V");
+ if (! wantvl && ! lsame_(jobvl, "N")) {
+ *info = -1;
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if ((*ldvl < 1) || (wantvl && *ldvl < *n)) {
+ *info = -9;
+ } else if ((*ldvr < 1) || (wantvr && *ldvr < *n)) {
+ *info = -11;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.
+ HSWORK refers to the workspace preferred by SHSEQR, as
+ calculated below. HSWORK is computed assuming ILO=1 and IHI=N,
+ the worst case.)
+*/
+
+ minwrk = 1;
+ if (*info == 0 && ((*lwork >= 1) || (lquery))) {
+ maxwrk = ((*n) << (1)) + *n * ilaenv_(&c__1, "SGEHRD", " ", n, &c__1,
+ n, &c__0, (ftnlen)6, (ftnlen)1);
+ if (! wantvl && ! wantvr) {
+/* Computing MAX */
+ i__1 = 1, i__2 = *n * 3;
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "SHSEQR", "EN", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "SHSEQR", "EN", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = 1, i__2 = (*n) << (2);
+ minwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = ((*n) << (1)) + (*n - 1) * ilaenv_(&c__1,
+ "SORGHR", " ", n, &c__1, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = ilaenv_(&c__8, "SHSEQR", "SV", n, &c__1, n, &c_n1, (ftnlen)
+ 6, (ftnlen)2);
+ maxb = max(i__1,2);
+/*
+ Computing MIN
+ Computing MAX
+*/
+ i__3 = 2, i__4 = ilaenv_(&c__4, "SHSEQR", "SV", n, &c__1, n, &
+ c_n1, (ftnlen)6, (ftnlen)2);
+ i__1 = min(maxb,*n), i__2 = max(i__3,i__4);
+ k = min(i__1,i__2);
+/* Computing MAX */
+ i__1 = k * (k + 2), i__2 = (*n) << (1);
+ hswork = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + 1, i__1 = max(i__1,i__2), i__2 = *n +
+ hswork;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = (*n) << (2);
+ maxwrk = max(i__1,i__2);
+ }
+ work[1] = (real) maxwrk;
+ }
+ if (*lwork < minwrk && ! lquery) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEEV ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+ smlnum = sqrt(smlnum) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", n, n, &a[a_offset], lda, dum);
+ scalea = FALSE_;
+ if (anrm > 0.f && anrm < smlnum) {
+ scalea = TRUE_;
+ cscale = smlnum;
+ } else if (anrm > bignum) {
+ scalea = TRUE_;
+ cscale = bignum;
+ }
+ if (scalea) {
+ slascl_("G", &c__0, &c__0, &anrm, &cscale, n, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+/*
+ Balance the matrix
+ (Workspace: need N)
+*/
+
+ ibal = 1;
+ sgebal_("B", n, &a[a_offset], lda, &ilo, &ihi, &work[ibal], &ierr);
+
+/*
+ Reduce to upper Hessenberg form
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ itau = ibal + *n;
+ iwrk = itau + *n;
+ i__1 = *lwork - iwrk + 1;
+ sgehrd_(n, &ilo, &ihi, &a[a_offset], lda, &work[itau], &work[iwrk], &i__1,
+ &ierr);
+
+ if (wantvl) {
+
+/*
+ Want left eigenvectors
+ Copy Householder vectors to VL
+*/
+
+ *(unsigned char *)side = 'L';
+ slacpy_("L", n, n, &a[a_offset], lda, &vl[vl_offset], ldvl)
+ ;
+
+/*
+ Generate orthogonal matrix in VL
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vl[vl_offset], ldvl, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VL
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vl[vl_offset], ldvl, &work[iwrk], &i__1, info);
+
+ if (wantvr) {
+
+/*
+ Want left and right eigenvectors
+ Copy Schur vectors to VR
+*/
+
+ *(unsigned char *)side = 'B';
+ slacpy_("F", n, n, &vl[vl_offset], ldvl, &vr[vr_offset], ldvr);
+ }
+
+ } else if (wantvr) {
+
+/*
+ Want right eigenvectors
+ Copy Householder vectors to VR
+*/
+
+ *(unsigned char *)side = 'R';
+ slacpy_("L", n, n, &a[a_offset], lda, &vr[vr_offset], ldvr)
+ ;
+
+/*
+ Generate orthogonal matrix in VR
+ (Workspace: need 3*N-1, prefer 2*N+(N-1)*NB)
+*/
+
+ i__1 = *lwork - iwrk + 1;
+ sorghr_(n, &ilo, &ihi, &vr[vr_offset], ldvr, &work[itau], &work[iwrk],
+ &i__1, &ierr);
+
+/*
+ Perform QR iteration, accumulating Schur vectors in VR
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("S", "V", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+
+ } else {
+
+/*
+ Compute eigenvalues only
+ (Workspace: need N+1, prefer N+HSWORK (see comments) )
+*/
+
+ iwrk = itau;
+ i__1 = *lwork - iwrk + 1;
+ shseqr_("E", "N", n, &ilo, &ihi, &a[a_offset], lda, &wr[1], &wi[1], &
+ vr[vr_offset], ldvr, &work[iwrk], &i__1, info);
+ }
+
+/* If INFO > 0 from SHSEQR, then quit */
+
+ if (*info > 0) {
+ goto L50;
+ }
+
+ if ((wantvl) || (wantvr)) {
+
+/*
+ Compute left and/or right eigenvectors
+ (Workspace: need 4*N)
+*/
+
+ strevc_(side, "B", select, n, &a[a_offset], lda, &vl[vl_offset], ldvl,
+ &vr[vr_offset], ldvr, n, &nout, &work[iwrk], &ierr);
+ }
+
+ if (wantvl) {
+
+/*
+ Undo balancing of left eigenvectors
+ (Workspace: need N)
+*/
+
+ sgebak_("B", "L", n, &ilo, &ihi, &work[ibal], n, &vl[vl_offset], ldvl,
+ &ierr);
+
+/* Normalize left eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vl[i__ * vl_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vl[i__ * vl_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vl[(i__ + 1) * vl_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vl[k + i__ * vl_dim1];
+/* Computing 2nd power */
+ r__2 = vl[k + (i__ + 1) * vl_dim1];
+ work[iwrk + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L10: */
+ }
+ k = isamax_(n, &work[iwrk], &c__1);
+ slartg_(&vl[k + i__ * vl_dim1], &vl[k + (i__ + 1) * vl_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vl[i__ * vl_dim1 + 1], &c__1, &vl[(i__ + 1) *
+ vl_dim1 + 1], &c__1, &cs, &sn);
+ vl[k + (i__ + 1) * vl_dim1] = 0.f;
+ }
+/* L20: */
+ }
+ }
+
+ if (wantvr) {
+
+/*
+ Undo balancing of right eigenvectors
+ (Workspace: need N)
+*/
+
+ sgebak_("B", "R", n, &ilo, &ihi, &work[ibal], n, &vr[vr_offset], ldvr,
+ &ierr);
+
+/* Normalize right eigenvectors and make largest component real */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (wi[i__] == 0.f) {
+ scl = 1.f / snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ } else if (wi[i__] > 0.f) {
+ r__1 = snrm2_(n, &vr[i__ * vr_dim1 + 1], &c__1);
+ r__2 = snrm2_(n, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ scl = 1.f / slapy2_(&r__1, &r__2);
+ sscal_(n, &scl, &vr[i__ * vr_dim1 + 1], &c__1);
+ sscal_(n, &scl, &vr[(i__ + 1) * vr_dim1 + 1], &c__1);
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing 2nd power */
+ r__1 = vr[k + i__ * vr_dim1];
+/* Computing 2nd power */
+ r__2 = vr[k + (i__ + 1) * vr_dim1];
+ work[iwrk + k - 1] = r__1 * r__1 + r__2 * r__2;
+/* L30: */
+ }
+ k = isamax_(n, &work[iwrk], &c__1);
+ slartg_(&vr[k + i__ * vr_dim1], &vr[k + (i__ + 1) * vr_dim1],
+ &cs, &sn, &r__);
+ srot_(n, &vr[i__ * vr_dim1 + 1], &c__1, &vr[(i__ + 1) *
+ vr_dim1 + 1], &c__1, &cs, &sn);
+ vr[k + (i__ + 1) * vr_dim1] = 0.f;
+ }
+/* L40: */
+ }
+ }
+
+/* Undo scaling if necessary */
+
+L50:
+ if (scalea) {
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[*info +
+ 1], &i__2, &ierr);
+ i__1 = *n - *info;
+/* Computing MAX */
+ i__3 = *n - *info;
+ i__2 = max(i__3,1);
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[*info +
+ 1], &i__2, &ierr);
+ if (*info > 0) {
+ i__1 = ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wr[1],
+ n, &ierr);
+ i__1 = ilo - 1;
+ slascl_("G", &c__0, &c__0, &cscale, &anrm, &i__1, &c__1, &wi[1],
+ n, &ierr);
+ }
+ }
+
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGEEV */
+
+} /* sgeev_ */
+
+/* Subroutine */ int sgehd2_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SGEHD2 reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to SGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= max(1,N).
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the n by n general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) REAL array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) REAL array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEHD2", &i__1);
+ return 0;
+ }
+
+ i__1 = *ihi - 1;
+ for (i__ = *ilo; i__ <= i__1; ++i__) {
+
+/* Compute elementary reflector H(i) to annihilate A(i+2:ihi,i) */
+
+ i__2 = *ihi - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ aii = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Apply H(i) to A(1:ihi,i+1:ihi) from the right */
+
+ i__2 = *ihi - i__;
+ slarf_("Right", ihi, &i__2, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[(i__ + 1) * a_dim1 + 1], lda, &work[1]);
+
+/* Apply H(i) to A(i+1:ihi,i+1:n) from the left */
+
+ i__2 = *ihi - i__;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + 1 + (i__ + 1) * a_dim1], lda, &work[1]);
+
+ a[i__ + 1 + i__ * a_dim1] = aii;
+/* L10: */
+ }
+
+ return 0;
+
+/* End of SGEHD2 */
+
+} /* sgehd2_ */
+
+/* Subroutine */ int sgehrd_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__;
+ static real t[4160] /* was [65][64] */;
+ static integer ib;
+ static real ei;
+ static integer nb, nh, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), sgehd2_(integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *), slarfb_(
+ char *, char *, char *, char *, integer *, integer *, integer *,
+ real *, integer *, real *, integer *, real *, integer *, real *,
+ integer *), slahrd_(integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+ , real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SGEHRD reduces a real general matrix A to upper Hessenberg form H by
+ an orthogonal similarity transformation: Q' * A * Q = H .
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that A is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to SGEBAL; otherwise they should be
+ set to 1 and N respectively. See Further Details.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the N-by-N general matrix to be reduced.
+ On exit, the upper triangle and the first subdiagonal of A
+ are overwritten with the upper Hessenberg matrix H, and the
+ elements below the first subdiagonal, with the array TAU,
+ represent the orthogonal matrix Q as a product of elementary
+ reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) REAL array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details). Elements 1:ILO-1 and IHI:N-1 of TAU are set to
+ zero.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The length of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of (ihi-ilo) elementary
+ reflectors
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0, v(i+1) = 1 and v(ihi+1:n) = 0; v(i+2:ihi) is stored on
+ exit in A(i+2:ihi,i), and tau in TAU(i).
+
+ The contents of A are illustrated by the following example, with
+ n = 7, ilo = 2 and ihi = 6:
+
+ on entry, on exit,
+
+ ( a a a a a a a ) ( a a h h h h a )
+ ( a a a a a a ) ( a h h h h a )
+ ( a a a a a a ) ( h h h h h h )
+ ( a a a a a a ) ( v2 h h h h h )
+ ( a a a a a a ) ( v2 v3 h h h h )
+ ( a a a a a a ) ( v2 v3 v4 h h h )
+ ( a ) ( a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+/* Computing MIN */
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEHRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Set elements 1:ILO-1 and IHI:N-1 of TAU to zero */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L10: */
+ }
+ i__1 = *n - 1;
+ for (i__ = max(1,*ihi); i__ <= i__1; ++i__) {
+ tau[i__] = 0.f;
+/* L20: */
+ }
+
+/* Quick return if possible */
+
+ nh = *ihi - *ilo + 1;
+ if (nh <= 1) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+/*
+ Determine the block size.
+
+ Computing MIN
+*/
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nb = min(i__1,i__2);
+ nbmin = 2;
+ iws = 1;
+ if (nb > 1 && nb < nh) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SGEHRD", " ", n, ilo, ihi, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < nh) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ iws = *n * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEHRD", " ", n, ilo, ihi, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ if (*lwork >= *n * nbmin) {
+ nb = *lwork / *n;
+ } else {
+ nb = 1;
+ }
+ }
+ }
+ }
+ ldwork = *n;
+
+ if ((nb < nbmin) || (nb >= nh)) {
+
+/* Use unblocked code below */
+
+ i__ = *ilo;
+
+ } else {
+
+/* Use blocked code */
+
+ i__1 = *ihi - 1 - nx;
+ i__2 = nb;
+ for (i__ = *ilo; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *ihi - i__;
+ ib = min(i__3,i__4);
+
+/*
+ Reduce columns i:i+ib-1 to Hessenberg form, returning the
+ matrices V and T of the block reflector H = I - V*T*V'
+ which performs the reduction, and also the matrix Y = A*V*T
+*/
+
+ slahrd_(ihi, &i__, &ib, &a[i__ * a_dim1 + 1], lda, &tau[i__], t, &
+ c__65, &work[1], &ldwork);
+
+/*
+ Apply the block reflector H to A(1:ihi,i+ib:ihi) from the
+ right, computing A := A - Y * V'. V(i+ib,ib-1) must be set
+ to 1.
+*/
+
+ ei = a[i__ + ib + (i__ + ib - 1) * a_dim1];
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = 1.f;
+ i__3 = *ihi - i__ - ib + 1;
+ sgemm_("No transpose", "Transpose", ihi, &i__3, &ib, &c_b1290, &
+ work[1], &ldwork, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b1011, &a[(i__ + ib) * a_dim1 + 1], lda);
+ a[i__ + ib + (i__ + ib - 1) * a_dim1] = ei;
+
+/*
+ Apply the block reflector H to A(i+1:ihi,i+ib:n) from the
+ left
+*/
+
+ i__3 = *ihi - i__;
+ i__4 = *n - i__ - ib + 1;
+ slarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + 1 + i__ * a_dim1], lda, t, &c__65, &a[
+ i__ + 1 + (i__ + ib) * a_dim1], lda, &work[1], &ldwork);
+/* L30: */
+ }
+ }
+
+/* Use unblocked code to reduce the rest of the matrix */
+
+ sgehd2_(n, &i__, ihi, &a[a_offset], lda, &tau[1], &work[1], &iinfo);
+ work[1] = (real) iws;
+
+ return 0;
+
+/* End of SGEHRD */
+
+} /* sgehrd_ */
+
+/* Subroutine */ int sgelq2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SGELQ2 computes an LQ factorization of a real m by n matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m by min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) REAL array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELQ2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) * a_dim1]
+ , lda, &tau[i__]);
+ if (i__ < *m) {
+
+/* Apply H(i) to A(i+1:m,i:n) from the right */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ slarf_("Right", &i__2, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[
+ i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGELQ2 */
+
+} /* sgelq2_ */
+
+/* Subroutine */ int sgelqf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgelq2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SGELQF computes an LQ factorization of a real M-by-N matrix A:
+ A = L * Q.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and below the diagonal of the array
+ contain the m-by-min(m,n) lower trapezoidal matrix L (L is
+ lower triangular if m <= n); the elements above the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(k) . . . H(2) H(1), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:n) is stored on exit in A(i,i+1:n),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGELQF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *m * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELQF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGELQF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGELQF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the LQ factorization of the current block
+ A(i:i+ib-1,i:n)
+*/
+
+ i__3 = *n - i__ + 1;
+ sgelq2_(&ib, &i__3, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *n - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i+ib:m,i:n) from the right */
+
+ i__3 = *m - i__ - ib + 1;
+ i__4 = *n - i__ + 1;
+ slarfb_("Right", "No transpose", "Forward", "Rowwise", &i__3,
+ &i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgelq2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGELQF */
+
+} /* sgelqf_ */
+
+/* Subroutine */ int sgelsd_(integer *m, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, real *s, real *rcond, integer *
+ rank, real *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer ie, il, mm;
+ static real eps, anrm, bnrm;
+ static integer itau, nlvl, iascl, ibscl;
+ static real sfmin;
+ static integer minmn, maxmn, itaup, itauq, mnthr, nwork;
+ extern /* Subroutine */ int slabad_(real *, real *), sgebrd_(integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slalsd_(char *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, integer *), slascl_(char *
+ , integer *, integer *, real *, real *, integer *, integer *,
+ real *, integer *, integer *);
+ static integer wlalsd;
+ extern /* Subroutine */ int sgeqrf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slacpy_(char *, integer
+ *, integer *, real *, integer *, real *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static integer ldwork;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+ , real *, integer *, integer *);
+ static integer minwrk, maxwrk;
+ static real smlnum;
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ static logical lquery;
+ static integer smlsiz;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SGELSD computes the minimum-norm solution to a real linear least
+ squares problem:
+ minimize 2-norm(| b - A*x |)
+ using the singular value decomposition (SVD) of A. A is an M-by-N
+ matrix which may be rank-deficient.
+
+ Several right hand side vectors b and solution vectors x can be
+ handled in a single call; they are stored as the columns of the
+ M-by-NRHS right hand side matrix B and the N-by-NRHS solution
+ matrix X.
+
+ The problem is solved in three steps:
+ (1) Reduce the coefficient matrix A to bidiagonal form with
+ Householder transformations, reducing the original problem
+ into a "bidiagonal least squares problem" (BLS)
+ (2) Solve the BLS using a divide and conquer approach.
+ (3) Apply back all the Householder tranformations to solve
+ the original least squares problem.
+
+ The effective rank of A is determined by treating as zero those
+ singular values which are less than RCOND times the largest singular
+ value.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrices B and X. NRHS >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, A has been destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On entry, the M-by-NRHS right hand side matrix B.
+ On exit, B is overwritten by the N-by-NRHS solution
+ matrix X. If m >= n and RANK = n, the residual
+ sum-of-squares for the solution in the i-th column is given
+ by the sum of squares of elements n+1:m in that column.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,max(M,N)).
+
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A in decreasing order.
+ The condition number of A in the 2-norm = S(1)/S(min(m,n)).
+
+ RCOND (input) REAL
+ RCOND is used to determine the effective rank of A.
+ Singular values S(i) <= RCOND*S(1) are treated as zero.
+ If RCOND < 0, machine precision is used instead.
+
+ RANK (output) INTEGER
+ The effective rank of A, i.e., the number of singular values
+ which are greater than RCOND*S(1).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK must be at least 1.
+ The exact minimum amount of workspace needed depends on M,
+ N and NRHS. As long as LWORK is at least
+ 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2,
+ if M is greater than or equal to N or
+ 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2,
+ if M is less than N, the code will execute correctly.
+ SMLSIZ is returned by ILAENV and is equal to the maximum
+ size of the subproblems at the bottom of the computation
+ tree (usually about 25), and
+ NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 )
+ For good performance, LWORK should generally be larger.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+
+ IWORK (workspace) INTEGER array, dimension (LIWORK)
+ LIWORK >= 3 * MINMN * NLVL + 11 * MINMN,
+ where MINMN = MIN( M,N ).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: the algorithm for computing the SVD failed to converge;
+ if INFO = i, i off-diagonal elements of an intermediate
+ bidiagonal form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input arguments.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ maxmn = max(*m,*n);
+ mnthr = ilaenv_(&c__6, "SGELSD", " ", m, n, nrhs, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*ldb < max(1,maxmn)) {
+ *info = -7;
+ }
+
+ smlsiz = ilaenv_(&c__9, "SGELSD", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Compute workspace.
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ minwrk = 1;
+ minmn = max(1,minmn);
+/* Computing MAX */
+ i__1 = (integer) (log((real) minmn / (real) (smlsiz + 1)) / log(2.f)) + 1;
+ nlvl = max(i__1,0);
+
+ if (*info == 0) {
+ maxwrk = 0;
+ mm = *m;
+ if (*m >= *n && *m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n + *nrhs * ilaenv_(&c__1, "SORMQR", "LT",
+ m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+ }
+ if (*m >= *n) {
+
+/*
+ Path 1 - overdetermined or exactly determined.
+
+ Computing MAX
+*/
+ i__1 = maxwrk, i__2 = *n * 3 + (mm + *n) * ilaenv_(&c__1, "SGEBRD"
+ , " ", &mm, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + *nrhs * ilaenv_(&c__1, "SORMBR",
+ "QLT", &mm, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + (*n - 1) * ilaenv_(&c__1, "SORMBR",
+ "PLN", n, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *n * 9 + ((*n) << (1)) * smlsiz + ((*n) << (3)) * nlvl +
+ *n * *nrhs + i__1 * i__1;
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *n * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = *n * 3 + mm, i__2 = *n * 3 + *nrhs, i__1 = max(i__1,i__2),
+ i__2 = *n * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ if (*n > *m) {
+/* Computing 2nd power */
+ i__1 = smlsiz + 1;
+ wlalsd = *m * 9 + ((*m) << (1)) * smlsiz + ((*m) << (3)) * nlvl +
+ *m * *nrhs + i__1 * i__1;
+ if (*n >= mnthr) {
+
+/*
+ Path 2a - underdetermined, with many more columns
+ than rows.
+*/
+
+ maxwrk = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &c_n1,
+ &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + ((*m) << (1))
+ * ilaenv_(&c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + *nrhs *
+ ilaenv_(&c__1, "SORMBR", "QLT", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + (*m - 1) *
+ ilaenv_(&c__1, "SORMBR", "PLN", m, nrhs, m, &c_n1, (
+ ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+ if (*nrhs > 1) {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + *m + *m * *nrhs;
+ maxwrk = max(i__1,i__2);
+ } else {
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (1));
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m + *nrhs * ilaenv_(&c__1, "SORMLQ",
+ "LT", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)2);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * *m + ((*m) << (2)) + wlalsd;
+ maxwrk = max(i__1,i__2);
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ maxwrk = *m * 3 + (*n + *m) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *nrhs * ilaenv_(&c__1, "SORMBR"
+ , "QLT", m, nrhs, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR",
+ "PLN", n, nrhs, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ maxwrk = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = *m * 3 + wlalsd;
+ maxwrk = max(i__1,i__2);
+ }
+/* Computing MAX */
+ i__1 = *m * 3 + *nrhs, i__2 = *m * 3 + *m, i__1 = max(i__1,i__2),
+ i__2 = *m * 3 + wlalsd;
+ minwrk = max(i__1,i__2);
+ }
+ minwrk = min(minwrk,maxwrk);
+ work[1] = (real) maxwrk;
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGELSD", &i__1);
+ return 0;
+ } else if (lquery) {
+ goto L10;
+ }
+
+/* Quick return if possible. */
+
+ if ((*m == 0) || (*n == 0)) {
+ *rank = 0;
+ return 0;
+ }
+
+/* Get machine parameters. */
+
+ eps = slamch_("P");
+ sfmin = slamch_("S");
+ smlnum = sfmin / eps;
+ bignum = 1.f / smlnum;
+ slabad_(&smlnum, &bignum);
+
+/* Scale A if max entry outside range [SMLNUM,BIGNUM]. */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, &work[1]);
+ iascl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 1;
+ } else if (anrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda,
+ info);
+ iascl = 2;
+ } else if (anrm == 0.f) {
+
+/* Matrix all zero. Return zero solution. */
+
+ i__1 = max(*m,*n);
+ slaset_("F", &i__1, nrhs, &c_b320, &c_b320, &b[b_offset], ldb);
+ slaset_("F", &minmn, &c__1, &c_b320, &c_b320, &s[1], &c__1)
+ ;
+ *rank = 0;
+ goto L10;
+ }
+
+/* Scale B if max entry outside range [SMLNUM,BIGNUM]. */
+
+ bnrm = slange_("M", m, nrhs, &b[b_offset], ldb, &work[1]);
+ ibscl = 0;
+ if (bnrm > 0.f && bnrm < smlnum) {
+
+/* Scale matrix norm up to SMLNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &smlnum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 1;
+ } else if (bnrm > bignum) {
+
+/* Scale matrix norm down to BIGNUM. */
+
+ slascl_("G", &c__0, &c__0, &bnrm, &bignum, m, nrhs, &b[b_offset], ldb,
+ info);
+ ibscl = 2;
+ }
+
+/* If M < N make sure certain entries of B are zero. */
+
+ if (*m < *n) {
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b320, &c_b320, &b[*m + 1 + b_dim1], ldb);
+ }
+
+/* Overdetermined case. */
+
+ if (*m >= *n) {
+
+/* Path 1 - overdetermined or exactly determined. */
+
+ mm = *m;
+ if (*m >= mnthr) {
+
+/* Path 1a - overdetermined, with many more rows than columns. */
+
+ mm = *n;
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R.
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+
+/*
+ Multiply B by transpose(Q).
+ (Workspace: need N+NRHS, prefer N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormqr_("L", "T", m, nrhs, n, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below R. */
+
+ if (*n > 1) {
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b320, &c_b320, &a[a_dim1 + 2],
+ lda);
+ }
+ }
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A.
+ (Workspace: need 3*N+MM, prefer 3*N+(MM+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(&mm, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of R.
+ (Workspace: need 3*N+NRHS, prefer 3*N+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", &mm, nrhs, n, &a[a_offset], lda, &work[itauq],
+ &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, n, nrhs, &s[1], &work[ie], &b[b_offset], ldb,
+ rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of R. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, n, &a[a_offset], lda, &work[itaup], &
+ b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
+ max(i__1,*nrhs), i__2 = *n - *m * 3;
+ if (*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2))
+ {
+
+/*
+ Path 2a - underdetermined, with many more columns than rows
+ and sufficient workspace for an efficient algorithm.
+*/
+
+ ldwork = *m;
+/*
+ Computing MAX
+ Computing MAX
+*/
+ i__3 = *m, i__4 = ((*m) << (1)) - 4, i__3 = max(i__3,i__4), i__3 =
+ max(i__3,*nrhs), i__4 = *n - *m * 3;
+ i__1 = ((*m) << (2)) + *m * *lda + max(i__3,i__4), i__2 = *m * *
+ lda + *m + *m * *nrhs;
+ if (*lwork >= max(i__1,i__2)) {
+ ldwork = *lda;
+ }
+ itau = 1;
+ nwork = *m + 1;
+
+/*
+ Compute A=L*Q.
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &i__1,
+ info);
+ il = nwork;
+
+/* Copy L to WORK(IL), zeroing out above its diagonal. */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwork);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b320, &c_b320, &work[il + ldwork], &
+ ldwork);
+ ie = il + ldwork * *m;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL).
+ (Workspace: need M*M+5*M, prefer M*M+4*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwork, &s[1], &work[ie], &work[itauq],
+ &work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors of L.
+ (Workspace: need M*M+4*M+NRHS, prefer M*M+4*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, m, &work[il], &ldwork, &work[
+ itauq], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("U", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of L. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", m, nrhs, m, &work[il], &ldwork, &work[
+ itaup], &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Zero out below first M rows of B. */
+
+ i__1 = *n - *m;
+ slaset_("F", &i__1, nrhs, &c_b320, &c_b320, &b[*m + 1 + b_dim1],
+ ldb);
+ nwork = itau + *m;
+
+/*
+ Multiply transpose(Q) by B.
+ (Workspace: need M+NRHS, prefer M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormlq_("L", "T", n, nrhs, m, &a[a_offset], lda, &work[itau], &b[
+ b_offset], ldb, &work[nwork], &i__1, info);
+
+ } else {
+
+/* Path 2 - remaining underdetermined cases. */
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A.
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__1, info);
+
+/*
+ Multiply B by transpose of left bidiagonalizing vectors.
+ (Workspace: need 3*M+NRHS, prefer 3*M+NRHS*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "T", m, nrhs, n, &a[a_offset], lda, &work[itauq]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+/* Solve the bidiagonal least squares problem. */
+
+ slalsd_("L", &smlsiz, m, nrhs, &s[1], &work[ie], &b[b_offset],
+ ldb, rcond, rank, &work[nwork], &iwork[1], info);
+ if (*info != 0) {
+ goto L10;
+ }
+
+/* Multiply B by right bidiagonalizing vectors of A. */
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "L", "N", n, nrhs, m, &a[a_offset], lda, &work[itaup]
+ , &b[b_offset], ldb, &work[nwork], &i__1, info);
+
+ }
+ }
+
+/* Undo scaling. */
+
+ if (iascl == 1) {
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ } else if (iascl == 2) {
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, n, nrhs, &b[b_offset], ldb,
+ info);
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, info);
+ }
+ if (ibscl == 1) {
+ slascl_("G", &c__0, &c__0, &smlnum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ } else if (ibscl == 2) {
+ slascl_("G", &c__0, &c__0, &bignum, &bnrm, n, nrhs, &b[b_offset], ldb,
+ info);
+ }
+
+L10:
+ work[1] = (real) maxwrk;
+ return 0;
+
+/* End of SGELSD */
+
+} /* sgelsd_ */
+
+/* Subroutine */ int sgeqr2_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, k;
+ static real aii;
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SGEQR2 computes a QR factorization of a real m by n matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the m by n matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(m,n) by n upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors (see Further Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace) REAL array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQR2", &i__1);
+ return 0;
+ }
+
+ k = min(*m,*n);
+
+ i__1 = k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ * a_dim1]
+ , &c__1, &tau[i__]);
+ if (i__ < *n) {
+
+/* Apply H(i) to A(i:m,i+1:n) from the left */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ slarf_("Left", &i__2, &i__3, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGEQR2 */
+
+} /* sgeqr2_ */
+
+/* Subroutine */ int sgeqrf_(integer *m, integer *n, real *a, integer *lda,
+ real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, k, ib, nb, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sgeqr2_(integer *, integer *, real *, integer
+ *, real *, real *, integer *), slarfb_(char *, char *, char *,
+ char *, integer *, integer *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SGEQRF computes a QR factorization of a real M-by-N matrix A:
+ A = Q * R.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit, the elements on and above the diagonal of the array
+ contain the min(M,N)-by-N upper trapezoidal matrix R (R is
+ upper triangular if m >= n); the elements below the diagonal,
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of min(m,n) elementary reflectors (see Further
+ Details).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (output) REAL array, dimension (min(M,N))
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of elementary reflectors
+
+ Q = H(1) H(2) . . . H(k), where k = min(m,n).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
+ and tau in TAU(i).
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SGEQRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGEQRF", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ k = min(*m,*n);
+ if (k == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SGEQRF", " ", m, n, &c_n1, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SGEQRF", " ", m, n, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < k && nx < k) {
+
+/* Use blocked code initially */
+
+ i__1 = k - nx;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = k - i__ + 1;
+ ib = min(i__3,nb);
+
+/*
+ Compute the QR factorization of the current block
+ A(i:m,i:i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ sgeqr2_(&i__3, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[
+ 1], &iinfo);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__3 = *m - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__3, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i:m,i+ib:n) from the left */
+
+ i__3 = *m - i__ + 1;
+ i__4 = *n - i__ - ib + 1;
+ slarfb_("Left", "Transpose", "Forward", "Columnwise", &i__3, &
+ i__4, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &work[ib
+ + 1], &ldwork);
+ }
+/* L10: */
+ }
+ } else {
+ i__ = 1;
+ }
+
+/* Use unblocked code to factor the last or only block. */
+
+ if (i__ <= k) {
+ i__2 = *m - i__ + 1;
+ i__1 = *n - i__ + 1;
+ sgeqr2_(&i__2, &i__1, &a[i__ + i__ * a_dim1], lda, &tau[i__], &work[1]
+ , &iinfo);
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SGEQRF */
+
+} /* sgeqrf_ */
+
+/* Subroutine */ int sgesdd_(char *jobz, integer *m, integer *n, real *a,
+ integer *lda, real *s, real *u, integer *ldu, real *vt, integer *ldvt,
+ real *work, integer *lwork, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2, i__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, ie, il, ir, iu, blk;
+ static real dum[1], eps;
+ static integer ivt, iscl;
+ static real anrm;
+ static integer idum[1], ierr, itau;
+ extern logical lsame_(char *, char *);
+ static integer chunk;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer minmn, wrkbl, itaup, itauq, mnthr;
+ static logical wntqa;
+ static integer nwork;
+ static logical wntqn, wntqo, wntqs;
+ static integer bdspac;
+ extern /* Subroutine */ int sbdsdc_(char *, char *, integer *, real *,
+ real *, real *, integer *, real *, integer *, real *, integer *,
+ real *, integer *, integer *), sgebrd_(integer *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ real *, integer *, integer *);
+ extern doublereal slamch_(char *), slange_(char *, integer *,
+ integer *, real *, integer *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static real bignum;
+ extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
+ *, real *, real *, integer *, integer *), slascl_(char *, integer
+ *, integer *, real *, real *, integer *, integer *, real *,
+ integer *, integer *), sgeqrf_(integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), slacpy_(char
+ *, integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *), sorgbr_(char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, integer *
+ );
+ static integer ldwrkl;
+ extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+ , real *, integer *, integer *);
+ static integer ldwrkr, minwrk, ldwrku, maxwrk;
+ extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ static integer ldwkvt;
+ static real smlnum;
+ static logical wntqas;
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ static logical lquery;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SGESDD computes the singular value decomposition (SVD) of a real
+ M-by-N matrix A, optionally computing the left and right singular
+ vectors. If singular vectors are desired, it uses a
+ divide-and-conquer algorithm.
+
+ The SVD is written
+
+ A = U * SIGMA * transpose(V)
+
+ where SIGMA is an M-by-N matrix which is zero except for its
+ min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and
+ V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA
+ are the singular values of A; they are real and non-negative, and
+ are returned in descending order. The first min(m,n) columns of
+ U and V are the left and right singular vectors of A.
+
+ Note that the routine returns VT = V**T, not V.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ Specifies options for computing all or part of the matrix U:
+ = 'A': all M columns of U and all N rows of V**T are
+ returned in the arrays U and VT;
+ = 'S': the first min(M,N) columns of U and the first
+ min(M,N) rows of V**T are returned in the arrays U
+ and VT;
+ = 'O': If M >= N, the first N columns of U are overwritten
+ on the array A and all rows of V**T are returned in
+ the array VT;
+ otherwise, all columns of U are returned in the
+ array U and the first M rows of V**T are overwritten
+ in the array VT;
+ = 'N': no columns of U or rows of V**T are computed.
+
+ M (input) INTEGER
+ The number of rows of the input matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the input matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix A.
+ On exit,
+ if JOBZ = 'O', A is overwritten with the first N columns
+ of U (the left singular vectors, stored
+ columnwise) if M >= N;
+ A is overwritten with the first M rows
+ of V**T (the right singular vectors, stored
+ rowwise) otherwise.
+ if JOBZ .ne. 'O', the contents of A are destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ S (output) REAL array, dimension (min(M,N))
+ The singular values of A, sorted so that S(i) >= S(i+1).
+
+ U (output) REAL array, dimension (LDU,UCOL)
+ UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N;
+ UCOL = min(M,N) if JOBZ = 'S'.
+ If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M
+ orthogonal matrix U;
+ if JOBZ = 'S', U contains the first min(M,N) columns of U
+ (the left singular vectors, stored columnwise);
+ if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= 1; if
+ JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M.
+
+ VT (output) REAL array, dimension (LDVT,N)
+ If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the
+ N-by-N orthogonal matrix V**T;
+ if JOBZ = 'S', VT contains the first min(M,N) rows of
+ V**T (the right singular vectors, stored rowwise);
+ if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= 1; if
+ JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N;
+ if JOBZ = 'S', LDVT >= min(M,N).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK;
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ If JOBZ = 'N',
+ LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)).
+ If JOBZ = 'O',
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)).
+ If JOBZ = 'S' or 'A'
+ LWORK >= 3*min(M,N)*min(M,N) +
+ max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)).
+ For good performance, LWORK should generally be larger.
+ If LWORK < 0 but other input arguments are legal, WORK(1)
+ returns the optimal LWORK.
+
+ IWORK (workspace) INTEGER array, dimension (8*min(M,N))
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: SBDSDC did not converge, updating process failed.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --s;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ minmn = min(*m,*n);
+ mnthr = (integer) (minmn * 11.f / 6.f);
+ wntqa = lsame_(jobz, "A");
+ wntqs = lsame_(jobz, "S");
+ wntqas = (wntqa) || (wntqs);
+ wntqo = lsame_(jobz, "O");
+ wntqn = lsame_(jobz, "N");
+ minwrk = 1;
+ maxwrk = 1;
+ lquery = *lwork == -1;
+
+ if (! ((((wntqa) || (wntqs)) || (wntqo)) || (wntqn))) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (((*ldu < 1) || (wntqas && *ldu < *m)) || (wntqo && *m < *n && *
+ ldu < *m)) {
+ *info = -8;
+ } else if ((((*ldvt < 1) || (wntqa && *ldvt < *n)) || (wntqs && *ldvt <
+ minmn)) || (wntqo && *m >= *n && *ldvt < *n)) {
+ *info = -10;
+ }
+
+/*
+ Compute workspace
+ (Note: Comments in the code beginning "Workspace:" describe the
+ minimal amount of workspace needed at that point in the code,
+ as well as the preferred amount for good performance.
+ NB refers to the optimal block size for the immediately
+ following subroutine, as returned by ILAENV.)
+*/
+
+ if (*info == 0 && *m > 0 && *n > 0) {
+ if (*m >= *n) {
+
+/* Compute space needed for SBDSDC */
+
+ if (wntqn) {
+ bdspac = *n * 7;
+ } else {
+ bdspac = *n * 3 * *n + ((*n) << (2));
+ }
+ if (*m >= mnthr) {
+ if (wntqn) {
+
+/* Path 1 (M much larger than N, JOBZ='N') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *n;
+ } else if (wntqo) {
+
+/* Path 2 (M much larger than N, JOBZ='O') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*n) << (1)) * *n;
+ minwrk = bdspac + ((*n) << (1)) * *n + *n * 3;
+ } else if (wntqs) {
+
+/* Path 3 (M much larger than N, JOBZ='S') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *n * ilaenv_(&c__1, "SORGQR",
+ " ", m, n, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ } else if (wntqa) {
+
+/* Path 4 (M much larger than N, JOBZ='A') */
+
+ wrkbl = *n + *n * ilaenv_(&c__1, "SGEQRF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n + *m * ilaenv_(&c__1, "SORGQR",
+ " ", m, m, n, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + ((*n) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", n, n, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "QLN", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *n * *n;
+ minwrk = bdspac + *n * *n + *n * 3;
+ }
+ } else {
+
+/* Path 5 (M at least N, but not much larger) */
+
+ wrkbl = *n * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *m, i__2 = *n * *n + bdspac;
+ minwrk = *n * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *n * 3 + *n * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = maxwrk, i__2 = bdspac + *n * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *n * 3 + max(*m,bdspac);
+ }
+ }
+ } else {
+
+/* Compute space needed for SBDSDC */
+
+ if (wntqn) {
+ bdspac = *m * 7;
+ } else {
+ bdspac = *m * 3 * *m + ((*m) << (2));
+ }
+ if (*n >= mnthr) {
+ if (wntqn) {
+
+/* Path 1t (N much larger than M, JOBZ='N') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m;
+ maxwrk = max(i__1,i__2);
+ minwrk = bdspac + *m;
+ } else if (wntqo) {
+
+/* Path 2t (N much larger than M, JOBZ='O') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + ((*m) << (1)) * *m;
+ minwrk = bdspac + ((*m) << (1)) * *m + *m * 3;
+ } else if (wntqs) {
+
+/* Path 3t (N much larger than M, JOBZ='S') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *m * ilaenv_(&c__1, "SORGLQ",
+ " ", m, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ } else if (wntqa) {
+
+/* Path 4t (N much larger than M, JOBZ='A') */
+
+ wrkbl = *m + *m * ilaenv_(&c__1, "SGELQF", " ", m, n, &
+ c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m + *n * ilaenv_(&c__1, "SORGLQ",
+ " ", n, n, m, &c_n1, (ftnlen)6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + ((*m) << (1)) * ilaenv_(&
+ c__1, "SGEBRD", " ", m, m, &c_n1, &c_n1, (ftnlen)
+ 6, (ftnlen)1);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", m, m, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *m;
+ minwrk = bdspac + *m * *m + *m * 3;
+ }
+ } else {
+
+/* Path 5t (N greater than M, but not much larger) */
+
+ wrkbl = *m * 3 + (*m + *n) * ilaenv_(&c__1, "SGEBRD", " ", m,
+ n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1);
+ if (wntqn) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqo) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ wrkbl = max(i__1,i__2);
+ maxwrk = wrkbl + *m * *n;
+/* Computing MAX */
+ i__1 = *n, i__2 = *m * *m + bdspac;
+ minwrk = *m * 3 + max(i__1,i__2);
+ } else if (wntqs) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", m, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ } else if (wntqa) {
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "QLN", m, m, n, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = *m * 3 + *m * ilaenv_(&c__1, "SORMBR"
+ , "PRT", n, n, m, &c_n1, (ftnlen)6, (ftnlen)3);
+ wrkbl = max(i__1,i__2);
+/* Computing MAX */
+ i__1 = wrkbl, i__2 = bdspac + *m * 3;
+ maxwrk = max(i__1,i__2);
+ minwrk = *m * 3 + max(*n,bdspac);
+ }
+ }
+ }
+ work[1] = (real) maxwrk;
+ }
+
+ if (*lwork < minwrk && ! lquery) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESDD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ if (*lwork >= 1) {
+ work[1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants */
+
+ eps = slamch_("P");
+ smlnum = sqrt(slamch_("S")) / eps;
+ bignum = 1.f / smlnum;
+
+/* Scale A if max element outside range [SMLNUM,BIGNUM] */
+
+ anrm = slange_("M", m, n, &a[a_offset], lda, dum);
+ iscl = 0;
+ if (anrm > 0.f && anrm < smlnum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
+ ierr);
+ } else if (anrm > bignum) {
+ iscl = 1;
+ slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
+ ierr);
+ }
+
+ if (*m >= *n) {
+
+/*
+ A has at least as many rows as columns. If A has sufficiently
+ more rows than columns, first reduce using the QR
+ decomposition (if sufficient workspace available)
+*/
+
+ if (*m >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1 (M much larger than N, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need 2*N, prefer N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out below R */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b320, &c_b320, &a[a_dim1 + 2],
+ lda);
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need 4*N, prefer 3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *n;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need N+BDSPAC)
+*/
+
+ sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2 (M much larger than N, JOBZ = 'O')
+ N left singular vectors to be overwritten on A and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is LDWRKR by N */
+
+ if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
+ ldwrkr = *lda;
+ } else {
+ ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
+ }
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ slaset_("L", &i__1, &i__2, &c_b320, &c_b320, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in VT, copying result to WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/* WORK(IU) is N by N */
+
+ iu = nwork;
+ nwork = iu + *n * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R
+ and VT by right singular vectors of R
+ (Workspace: need 2*N*N+3*N, prefer 2*N*N+2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IU), storing result in WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__1 = *m;
+ i__2 = ldwrkr;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ sgemm_("N", "N", &chunk, n, n, &c_b1011, &a[i__ + a_dim1],
+ lda, &work[iu], n, &c_b320, &work[ir], &ldwrkr);
+ slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L10: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3 (M much larger than N, JOBZ='S')
+ N left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ ir = 1;
+
+/* WORK(IR) is N by N */
+
+ ldwrkr = *n;
+ itau = ir + ldwrkr * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy R to WORK(IR), zeroing out below it */
+
+ slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ slaset_("L", &i__2, &i__1, &c_b320, &c_b320, &work[ir + 1], &
+ ldwrkr);
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in WORK(IR)
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagoal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in A by left singular vectors of R in
+ WORK(IR), storing result in U
+ (Workspace: need N*N)
+*/
+
+ slacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
+ sgemm_("N", "N", m, n, n, &c_b1011, &a[a_offset], lda, &work[
+ ir], &ldwrkr, &c_b320, &u[u_offset], ldu);
+
+ } else if (wntqa) {
+
+/*
+ Path 4 (M much larger than N, JOBZ='A')
+ M left singular vectors to be computed in U and
+ N right singular vectors to be computed in VT
+*/
+
+ iu = 1;
+
+/* WORK(IU) is N by N */
+
+ ldwrku = *n;
+ itau = iu + ldwrku * *n;
+ nwork = itau + *n;
+
+/*
+ Compute A=Q*R, copying result to U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+/*
+ Generate Q in U
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+ i__2 = *lwork - nwork + 1;
+ sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
+ &i__2, &ierr);
+
+/* Produce R in A, zeroing out other entries */
+
+ i__2 = *n - 1;
+ i__1 = *n - 1;
+ slaset_("L", &i__2, &i__1, &c_b320, &c_b320, &a[a_dim1 + 2],
+ lda);
+ ie = itau;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize R in A
+ (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite WORK(IU) by left singular vectors of R and VT
+ by right singular vectors of R
+ (Workspace: need N*N+3*N, prefer N*N+2*N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply Q in U by left singular vectors of R in
+ WORK(IU), storing result in A
+ (Workspace: need N*N)
+*/
+
+ sgemm_("N", "N", m, n, n, &c_b1011, &u[u_offset], ldu, &work[
+ iu], &ldwrku, &c_b320, &a[a_offset], lda);
+
+/* Copy left singular vectors of A from A to U */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
+
+ }
+
+ } else {
+
+/*
+ M .LT. MNTHR
+
+ Path 5 (M at least N, but not much larger)
+ Reduce to bidiagonal form without QR decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *n;
+ itaup = itauq + *n;
+ nwork = itaup + *n;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need N+BDSPAC)
+*/
+
+ sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ iu = nwork;
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/* WORK( IU ) is M by N */
+
+ ldwrku = *m;
+ nwork = iu + ldwrku * *n;
+ slaset_("F", m, n, &c_b320, &c_b320, &work[iu], &ldwrku);
+ } else {
+
+/* WORK( IU ) is N by N */
+
+ ldwrku = *n;
+ nwork = iu + ldwrku * *n;
+
+/* WORK(IR) is LDWRKR by N */
+
+ ir = nwork;
+ ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
+ }
+ nwork = iu + ldwrku * *n;
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in WORK(IU) and computing right
+ singular vectors of bidiagonal matrix in VT
+ (Workspace: need N+N*N+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
+ vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
+ 1], info);
+
+/*
+ Overwrite VT by right singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+ if (*lwork >= *m * *n + *n * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IU) by left singular vectors of A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
+ ierr);
+
+/* Copy left singular vectors of A from WORK(IU) to A */
+
+ slacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate Q in A
+ (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by left singular vectors of
+ bidiagonal matrix in WORK(IU), storing result in
+ WORK(IR) and copying to A
+ (Workspace: need 2*N*N, prefer N*N+M*N)
+*/
+
+ i__2 = *m;
+ i__1 = ldwrkr;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *m - i__ + 1;
+ chunk = min(i__3,ldwrkr);
+ sgemm_("N", "N", &chunk, n, n, &c_b1011, &a[i__ +
+ a_dim1], lda, &work[iu], &ldwrku, &c_b320, &
+ work[ir], &ldwrkr);
+ slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
+ a_dim1], lda);
+/* L20: */
+ }
+ }
+
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ slaset_("F", m, n, &c_b320, &c_b320, &u[u_offset], ldu);
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*N, prefer 2*N+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need N+BDSPAC)
+*/
+
+ slaset_("F", m, m, &c_b320, &c_b320, &u[u_offset], ldu);
+ sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of U to identity matrix */
+
+ i__1 = *m - *n;
+ i__2 = *m - *n;
+ slaset_("F", &i__1, &i__2, &c_b320, &c_b1011, &u[*n + 1 + (*n
+ + 1) * u_dim1], ldu);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need N*N+2*N+M, prefer N*N+2*N+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ } else {
+
+/*
+ A has more columns than rows. If A has sufficiently more
+ columns than rows, first reduce using the LQ decomposition (if
+ sufficient workspace available)
+*/
+
+ if (*n >= mnthr) {
+
+ if (wntqn) {
+
+/*
+ Path 1t (N much larger than M, JOBZ='N')
+ No singular vectors to be computed
+*/
+
+ itau = 1;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need 2*M, prefer M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Zero out above L */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b320, &c_b320, &a[((a_dim1) << (
+ 1)) + 1], lda);
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need 4*M, prefer 3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+ nwork = ie + *m;
+
+/*
+ Perform bidiagonal SVD, computing singular values only
+ (Workspace: need M+BDSPAC)
+*/
+
+ sbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+
+ } else if (wntqo) {
+
+/*
+ Path 2t (N much larger than M, JOBZ='O')
+ M right singular vectors to be overwritten on A and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* IVT is M by M */
+
+ il = ivt + *m * *m;
+ if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
+
+/* WORK(IL) is M by N */
+
+ ldwrkl = *m;
+ chunk = *n;
+ } else {
+ ldwrkl = *m;
+ chunk = (*lwork - *m * *m) / *m;
+ }
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__1, &ierr);
+
+/* Copy L to WORK(IL), zeroing about above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ slaset_("U", &i__1, &i__2, &c_b320, &c_b320, &work[il +
+ ldwrkl], &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__1, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IL)
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__1, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U, and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], m, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need 2*M*M+3*M, prefer 2*M*M+2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by Q
+ in A, storing result in WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__1 = *n;
+ i__2 = chunk;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b1011, &work[ivt], m, &a[
+ i__ * a_dim1 + 1], lda, &c_b320, &work[il], &
+ ldwrkl);
+ slacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
+ + 1], lda);
+/* L30: */
+ }
+
+ } else if (wntqs) {
+
+/*
+ Path 3t (N much larger than M, JOBZ='S')
+ M right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ il = 1;
+
+/* WORK(IL) is M by M */
+
+ ldwrkl = *m;
+ itau = il + ldwrkl * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+
+/* Copy L to WORK(IL), zeroing out above it */
+
+ slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ slaset_("U", &i__2, &i__1, &c_b320, &c_b320, &work[il +
+ ldwrkl], &ldwrkl);
+
+/*
+ Generate Q in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
+ &i__2, &ierr);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in WORK(IU), copying result to U
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of L and VT
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IL) by
+ Q in A, storing result in VT
+ (Workspace: need M*M)
+*/
+
+ slacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
+ sgemm_("N", "N", m, n, m, &c_b1011, &work[il], &ldwrkl, &a[
+ a_offset], lda, &c_b320, &vt[vt_offset], ldvt);
+
+ } else if (wntqa) {
+
+/*
+ Path 4t (N much larger than M, JOBZ='A')
+ N right singular vectors to be computed in VT and
+ M left singular vectors to be computed in U
+*/
+
+ ivt = 1;
+
+/* WORK(IVT) is M by M */
+
+ ldwkvt = *m;
+ itau = ivt + ldwkvt * *m;
+ nwork = itau + *m;
+
+/*
+ Compute A=L*Q, copying result to VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
+ i__2, &ierr);
+ slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+/*
+ Generate Q in VT
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
+ nwork], &i__2, &ierr);
+
+/* Produce L in A, zeroing out other entries */
+
+ i__2 = *m - 1;
+ i__1 = *m - 1;
+ slaset_("U", &i__2, &i__1, &c_b320, &c_b320, &a[((a_dim1) << (
+ 1)) + 1], lda);
+ ie = itau;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize L in A
+ (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
+ itauq], &work[itaup], &work[nwork], &i__2, &ierr);
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M+M*M+BDSPAC)
+*/
+
+ sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of L and WORK(IVT)
+ by right singular vectors of L
+ (Workspace: need M*M+3*M, prefer M*M+2*M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
+ ierr);
+
+/*
+ Multiply right singular vectors of L in WORK(IVT) by
+ Q in VT, storing result in A
+ (Workspace: need M*M)
+*/
+
+ sgemm_("N", "N", m, n, m, &c_b1011, &work[ivt], &ldwkvt, &vt[
+ vt_offset], ldvt, &c_b320, &a[a_offset], lda);
+
+/* Copy right singular vectors of A from A to VT */
+
+ slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
+
+ }
+
+ } else {
+
+/*
+ N .LT. MNTHR
+
+ Path 5t (N greater than M, but not much larger)
+ Reduce to bidiagonal form without LQ decomposition
+*/
+
+ ie = 1;
+ itauq = ie + *m;
+ itaup = itauq + *m;
+ nwork = itaup + *m;
+
+/*
+ Bidiagonalize A
+ (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
+ work[itaup], &work[nwork], &i__2, &ierr);
+ if (wntqn) {
+
+/*
+ Perform bidiagonal SVD, only computing singular values
+ (Workspace: need M+BDSPAC)
+*/
+
+ sbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
+ dum, idum, &work[nwork], &iwork[1], info);
+ } else if (wntqo) {
+ ldwkvt = *m;
+ ivt = nwork;
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/* WORK( IVT ) is M by N */
+
+ slaset_("F", m, n, &c_b320, &c_b320, &work[ivt], &ldwkvt);
+ nwork = ivt + ldwkvt * *n;
+ } else {
+
+/* WORK( IVT ) is M by M */
+
+ nwork = ivt + ldwkvt * *m;
+ il = nwork;
+
+/* WORK(IL) is M by CHUNK */
+
+ chunk = (*lwork - *m * *m - *m * 3) / *m;
+ }
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in WORK(IVT)
+ (Workspace: need M*M+BDSPAC)
+*/
+
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
+ work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
+ , info);
+
+/*
+ Overwrite U by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
+
+ if (*lwork >= *m * *n + *m * 3 + bdspac) {
+
+/*
+ Overwrite WORK(IVT) by left singular vectors of A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
+ &ierr);
+
+/* Copy right singular vectors of A from WORK(IVT) to A */
+
+ slacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
+ } else {
+
+/*
+ Generate P**T in A
+ (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
+*/
+
+ i__2 = *lwork - nwork + 1;
+ sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
+ work[nwork], &i__2, &ierr);
+
+/*
+ Multiply Q in A by right singular vectors of
+ bidiagonal matrix in WORK(IVT), storing result in
+ WORK(IL) and copying to A
+ (Workspace: need 2*M*M, prefer M*M+M*N)
+*/
+
+ i__2 = *n;
+ i__1 = chunk;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
+ i__1) {
+/* Computing MIN */
+ i__3 = *n - i__ + 1;
+ blk = min(i__3,chunk);
+ sgemm_("N", "N", m, &blk, m, &c_b1011, &work[ivt], &
+ ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b320, &
+ work[il], m);
+ slacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
+ 1], lda);
+/* L40: */
+ }
+ }
+ } else if (wntqs) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ slaset_("F", m, n, &c_b320, &c_b320, &vt[vt_offset], ldvt);
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 3*M, prefer 2*M+M*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ } else if (wntqa) {
+
+/*
+ Perform bidiagonal SVD, computing left singular vectors
+ of bidiagonal matrix in U and computing right singular
+ vectors of bidiagonal matrix in VT
+ (Workspace: need M+BDSPAC)
+*/
+
+ slaset_("F", n, n, &c_b320, &c_b320, &vt[vt_offset], ldvt);
+ sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
+ vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
+ info);
+
+/* Set the right corner of VT to identity matrix */
+
+ i__1 = *n - *m;
+ i__2 = *n - *m;
+ slaset_("F", &i__1, &i__2, &c_b320, &c_b1011, &vt[*m + 1 + (*
+ m + 1) * vt_dim1], ldvt);
+
+/*
+ Overwrite U by left singular vectors of A and VT
+ by right singular vectors of A
+ (Workspace: need 2*M+N, prefer 2*M+N*NB)
+*/
+
+ i__1 = *lwork - nwork + 1;
+ sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
+ itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
+ i__1 = *lwork - nwork + 1;
+ sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
+ itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
+ ierr);
+ }
+
+ }
+
+ }
+
+/* Undo scaling if necessary */
+
+ if (iscl == 1) {
+ if (anrm > bignum) {
+ slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ if (anrm < smlnum) {
+ slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
+ minmn, &ierr);
+ }
+ }
+
+/* Return optimal workspace in WORK(1) */
+
+ work[1] = (real) maxwrk;
+
+ return 0;
+
+/* End of SGESDD */
+
+} /* sgesdd_ */
+
+/* Subroutine */ int sgesv_(integer *n, integer *nrhs, real *a, integer *lda,
+ integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int xerbla_(char *, integer *), sgetrf_(
+ integer *, integer *, real *, integer *, integer *, integer *),
+ sgetrs_(char *, integer *, integer *, real *, integer *, integer *
+ , real *, integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SGESV computes the solution to a real system of linear equations
+ A * X = B,
+ where A is an N-by-N matrix and X and B are N-by-NRHS matrices.
+
+ The LU decomposition with partial pivoting and row interchanges is
+ used to factor A as
+ A = P * L * U,
+ where P is a permutation matrix, L is unit lower triangular, and U is
+ upper triangular. The factored form of A is then used to solve the
+ system of equations A * X = B.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of linear equations, i.e., the order of the
+ matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the N-by-N coefficient matrix A.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (output) INTEGER array, dimension (N)
+ The pivot indices that define the permutation matrix P;
+ row i of the matrix was interchanged with row IPIV(i).
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On entry, the N-by-NRHS matrix of right hand side matrix B.
+ On exit, if INFO = 0, the N-by-NRHS solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, so the solution could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -1;
+ } else if (*nrhs < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGESV ", &i__1);
+ return 0;
+ }
+
+/* Compute the LU factorization of A. */
+
+ sgetrf_(n, n, &a[a_offset], lda, &ipiv[1], info);
+ if (*info == 0) {
+
+/* Solve the system A*X = B, overwriting B with X. */
+
+ sgetrs_("No transpose", n, nrhs, &a[a_offset], lda, &ipiv[1], &b[
+ b_offset], ldb, info);
+ }
+ return 0;
+
+/* End of SGESV */
+
+} /* sgesv_ */
+
+/* Subroutine */ int sgetf2_(integer *m, integer *n, real *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ static integer j, jp;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *), sscal_(integer *
+ , real *, real *, integer *), sswap_(integer *, real *, integer *,
+ real *, integer *), xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1992
+
+
+ Purpose
+ =======
+
+ SGETF2 computes an LU factorization of a general m-by-n matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the m by n matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, U(k,k) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+
+/* Find pivot and test for singularity. */
+
+ i__2 = *m - j + 1;
+ jp = j - 1 + isamax_(&i__2, &a[j + j * a_dim1], &c__1);
+ ipiv[j] = jp;
+ if (a[jp + j * a_dim1] != 0.f) {
+
+/* Apply the interchange to columns 1:N. */
+
+ if (jp != j) {
+ sswap_(n, &a[j + a_dim1], lda, &a[jp + a_dim1], lda);
+ }
+
+/* Compute elements J+1:M of J-th column. */
+
+ if (j < *m) {
+ i__2 = *m - j;
+ r__1 = 1.f / a[j + j * a_dim1];
+ sscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+
+ } else if (*info == 0) {
+
+ *info = j;
+ }
+
+ if (j < min(*m,*n)) {
+
+/* Update trailing submatrix. */
+
+ i__2 = *m - j;
+ i__3 = *n - j;
+ sger_(&i__2, &i__3, &c_b1290, &a[j + 1 + j * a_dim1], &c__1, &a[j
+ + (j + 1) * a_dim1], lda, &a[j + 1 + (j + 1) * a_dim1],
+ lda);
+ }
+/* L10: */
+ }
+ return 0;
+
+/* End of SGETF2 */
+
+} /* sgetf2_ */
+
+/* Subroutine */ int sgetrf_(integer *m, integer *n, real *a, integer *lda,
+ integer *ipiv, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, jb, nb, iinfo;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), sgetf2_(integer *,
+ integer *, real *, integer *, integer *, integer *), xerbla_(char
+ *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SGETRF computes an LU factorization of a general M-by-N matrix A
+ using partial pivoting with row interchanges.
+
+ The factorization has the form
+ A = P * L * U
+ where P is a permutation matrix, L is lower triangular with unit
+ diagonal elements (lower trapezoidal if m > n), and U is upper
+ triangular (upper trapezoidal if m < n).
+
+ This is the right-looking Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the M-by-N matrix to be factored.
+ On exit, the factors L and U from the factorization
+ A = P*L*U; the unit diagonal elements of L are not stored.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ IPIV (output) INTEGER array, dimension (min(M,N))
+ The pivot indices; for 1 <= i <= min(M,N), row i of the
+ matrix was interchanged with row IPIV(i).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, U(i,i) is exactly zero. The factorization
+ has been completed, but the factor U is exactly
+ singular, and division by zero will occur if it is used
+ to solve a system of equations.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*m)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
+ 1);
+ if ((nb <= 1) || (nb >= min(*m,*n))) {
+
+/* Use unblocked code. */
+
+ sgetf2_(m, n, &a[a_offset], lda, &ipiv[1], info);
+ } else {
+
+/* Use blocked code. */
+
+ i__1 = min(*m,*n);
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__3 = min(*m,*n) - j + 1;
+ jb = min(i__3,nb);
+
+/*
+ Factor diagonal and subdiagonal blocks and test for exact
+ singularity.
+*/
+
+ i__3 = *m - j + 1;
+ sgetf2_(&i__3, &jb, &a[j + j * a_dim1], lda, &ipiv[j], &iinfo);
+
+/* Adjust INFO and the pivot indices. */
+
+ if (*info == 0 && iinfo > 0) {
+ *info = iinfo + j - 1;
+ }
+/* Computing MIN */
+ i__4 = *m, i__5 = j + jb - 1;
+ i__3 = min(i__4,i__5);
+ for (i__ = j; i__ <= i__3; ++i__) {
+ ipiv[i__] = j - 1 + ipiv[i__];
+/* L10: */
+ }
+
+/* Apply interchanges to columns 1:J-1. */
+
+ i__3 = j - 1;
+ i__4 = j + jb - 1;
+ slaswp_(&i__3, &a[a_offset], lda, &j, &i__4, &ipiv[1], &c__1);
+
+ if (j + jb <= *n) {
+
+/* Apply interchanges to columns J+JB:N. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j + jb - 1;
+ slaswp_(&i__3, &a[(j + jb) * a_dim1 + 1], lda, &j, &i__4, &
+ ipiv[1], &c__1);
+
+/* Compute block row of U. */
+
+ i__3 = *n - j - jb + 1;
+ strsm_("Left", "Lower", "No transpose", "Unit", &jb, &i__3, &
+ c_b1011, &a[j + j * a_dim1], lda, &a[j + (j + jb) *
+ a_dim1], lda);
+ if (j + jb <= *m) {
+
+/* Update trailing submatrix. */
+
+ i__3 = *m - j - jb + 1;
+ i__4 = *n - j - jb + 1;
+ sgemm_("No transpose", "No transpose", &i__3, &i__4, &jb,
+ &c_b1290, &a[j + jb + j * a_dim1], lda, &a[j + (j
+ + jb) * a_dim1], lda, &c_b1011, &a[j + jb + (j +
+ jb) * a_dim1], lda);
+ }
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SGETRF */
+
+} /* sgetrf_ */
+
+/* Subroutine */ int sgetrs_(char *trans, integer *n, integer *nrhs, real *a,
+ integer *lda, integer *ipiv, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ ), xerbla_(char *, integer *);
+ static logical notran;
+ extern /* Subroutine */ int slaswp_(integer *, real *, integer *, integer
+ *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SGETRS solves a system of linear equations
+ A * X = B or A' * X = B
+ with a general N-by-N matrix A using the LU factorization computed
+ by SGETRF.
+
+ Arguments
+ =========
+
+ TRANS (input) CHARACTER*1
+ Specifies the form of the system of equations:
+ = 'N': A * X = B (No transpose)
+ = 'T': A'* X = B (Transpose)
+ = 'C': A'* X = B (Conjugate transpose = Transpose)
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ The factors L and U from the factorization A = P*L*U
+ as computed by SGETRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ IPIV (input) INTEGER array, dimension (N)
+ The pivot indices from SGETRF; for 1<=i<=N, row i of the
+ matrix was interchanged with row IPIV(i).
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ notran = lsame_(trans, "N");
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SGETRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (notran) {
+
+/*
+ Solve A * X = B.
+
+ Apply row interchanges to the right hand sides.
+*/
+
+ slaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c__1);
+
+/* Solve L*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "No transpose", "Unit", n, nrhs, &c_b1011, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1011,
+ &a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A' * X = B.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ strsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b1011, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "Transpose", "Unit", n, nrhs, &c_b1011, &a[
+ a_offset], lda, &b[b_offset], ldb);
+
+/* Apply row interchanges to the solution vectors. */
+
+ slaswp_(nrhs, &b[b_offset], ldb, &c__1, n, &ipiv[1], &c_n1);
+ }
+
+ return 0;
+
+/* End of SGETRS */
+
+} /* sgetrs_ */
+
+/* Subroutine */ int shseqr_(char *job, char *compz, integer *n, integer *ilo,
+ integer *ihi, real *h__, integer *ldh, real *wr, real *wi, real *z__,
+ integer *ldz, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ real r__1, r__2;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__, j, k, l;
+ static real s[225] /* was [15][15] */, v[16];
+ static integer i1, i2, ii, nh, nr, ns, nv;
+ static real vv[16];
+ static integer itn;
+ static real tau;
+ static integer its;
+ static real ulp, tst1;
+ static integer maxb;
+ static real absw;
+ static integer ierr;
+ static real unfl, temp, ovfl;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static integer itemp;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ static logical initz, wantt;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static logical wantz;
+ extern doublereal slapy2_(real *, real *);
+ extern /* Subroutine */ int slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern doublereal slanhs_(char *, integer *, real *, integer *, real *);
+ extern /* Subroutine */ int slahqr_(logical *, logical *, integer *,
+ integer *, integer *, real *, integer *, real *, real *, integer *
+ , integer *, real *, integer *, integer *), slacpy_(char *,
+ integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *), slarfx_(char *, integer *, integer *,
+ real *, real *, real *, integer *, real *);
+ static real smlnum;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SHSEQR computes the eigenvalues of a real upper Hessenberg matrix H
+ and, optionally, the matrices T and Z from the Schur decomposition
+ H = Z T Z**T, where T is an upper quasi-triangular matrix (the Schur
+ form), and Z is the orthogonal matrix of Schur vectors.
+
+ Optionally Z may be postmultiplied into an input orthogonal matrix Q,
+ so that this routine can give the Schur factorization of a matrix A
+ which has been reduced to the Hessenberg form H by the orthogonal
+ matrix Q: A = Q*H*Q**T = (QZ)*T*(QZ)**T.
+
+ Arguments
+ =========
+
+ JOB (input) CHARACTER*1
+ = 'E': compute eigenvalues only;
+ = 'S': compute eigenvalues and the Schur form T.
+
+ COMPZ (input) CHARACTER*1
+ = 'N': no Schur vectors are computed;
+ = 'I': Z is initialized to the unit matrix and the matrix Z
+ of Schur vectors of H is returned;
+ = 'V': Z must contain an orthogonal matrix Q on entry, and
+ the product Q*Z is returned.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper triangular in rows
+ and columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally
+ set by a previous call to SGEBAL, and then passed to SGEHRD
+ when the matrix output by SGEBAL is reduced to Hessenberg
+ form. Otherwise ILO and IHI should be set to 1 and N
+ respectively.
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ H (input/output) REAL array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if JOB = 'S', H contains the upper quasi-triangular
+ matrix T from the Schur decomposition (the Schur form);
+ 2-by-2 diagonal blocks (corresponding to complex conjugate
+ pairs of eigenvalues) are returned in standard form, with
+ H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. If JOB = 'E',
+ the contents of H are unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) REAL array, dimension (N)
+ WI (output) REAL array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues. If two eigenvalues are computed as a complex
+ conjugate pair, they are stored in consecutive elements of
+ WR and WI, say the i-th and (i+1)th, with WI(i) > 0 and
+ WI(i+1) < 0. If JOB = 'S', the eigenvalues are stored in the
+ same order as on the diagonal of the Schur form returned in
+ H, with WR(i) = H(i,i) and, if H(i:i+1,i:i+1) is a 2-by-2
+ diagonal block, WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and
+ WI(i+1) = -WI(i).
+
+ Z (input/output) REAL array, dimension (LDZ,N)
+ If COMPZ = 'N': Z is not referenced.
+ If COMPZ = 'I': on entry, Z need not be set, and on exit, Z
+ contains the orthogonal matrix Z of the Schur vectors of H.
+ If COMPZ = 'V': on entry Z must contain an N-by-N matrix Q,
+ which is assumed to be equal to the unit matrix except for
+ the submatrix Z(ILO:IHI,ILO:IHI); on exit Z contains Q*Z.
+ Normally Q is the orthogonal matrix generated by SORGHR after
+ the call to SGEHRD which formed the Hessenberg matrix H.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z.
+ LDZ >= max(1,N) if COMPZ = 'I' or 'V'; LDZ >= 1 otherwise.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, SHSEQR failed to compute all of the
+ eigenvalues in a total of 30*(IHI-ILO+1) iterations;
+ elements 1:ilo-1 and i+1:n of WR and WI contain those
+ eigenvalues which have been successfully computed.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ wantt = lsame_(job, "S");
+ initz = lsame_(compz, "I");
+ wantz = (initz) || (lsame_(compz, "V"));
+
+ *info = 0;
+ work[1] = (real) max(1,*n);
+ lquery = *lwork == -1;
+ if (! lsame_(job, "E") && ! wantt) {
+ *info = -1;
+ } else if (! lsame_(compz, "N") && ! wantz) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -4;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -5;
+ } else if (*ldh < max(1,*n)) {
+ *info = -7;
+ } else if ((*ldz < 1) || (wantz && *ldz < max(1,*n))) {
+ *info = -11;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -13;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SHSEQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Initialize Z, if necessary */
+
+ if (initz) {
+ slaset_("Full", n, n, &c_b320, &c_b1011, &z__[z_offset], ldz);
+ }
+
+/* Store the eigenvalues isolated by SGEBAL. */
+
+ i__1 = *ilo - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+/* L10: */
+ }
+ i__1 = *n;
+ for (i__ = *ihi + 1; i__ <= i__1; ++i__) {
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+/* L20: */
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.f;
+ return 0;
+ }
+
+/*
+ Set rows and columns ILO to IHI to zero below the first
+ subdiagonal.
+*/
+
+ i__1 = *ihi - 2;
+ for (j = *ilo; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j + 2; i__ <= i__2; ++i__) {
+ h__[i__ + j * h_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ nh = *ihi - *ilo + 1;
+
+/*
+ Determine the order of the multi-shift QR algorithm to be used.
+
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ ns = ilaenv_(&c__4, "SHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = job;
+ i__3[1] = 1, a__1[1] = compz;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ maxb = ilaenv_(&c__8, "SHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if (((ns <= 2) || (ns > nh)) || (maxb >= nh)) {
+
+/* Use the standard double-shift algorithm */
+
+ slahqr_(&wantt, &wantz, n, ilo, ihi, &h__[h_offset], ldh, &wr[1], &wi[
+ 1], ilo, ihi, &z__[z_offset], ldz, info);
+ return 0;
+ }
+ maxb = max(3,maxb);
+/* Computing MIN */
+ i__1 = min(ns,maxb);
+ ns = min(i__1,15);
+
+/*
+ Now 2 < NS <= MAXB < NH.
+
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of multiple-shift QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of at most MAXB. Each iteration of the loop
+ works with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L50:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L170;
+ }
+
+/*
+ Perform multiple-shift QR iterations on rows and columns ILO to I
+ until a submatrix of order at most MAXB splits off at the bottom
+ because a subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (r__1 = h__[k - 1 + (k - 1) * h_dim1], dabs(r__1)) + (r__2
+ = h__[k + k * h_dim1], dabs(r__2));
+ if (tst1 == 0.f) {
+ i__4 = i__ - l + 1;
+ tst1 = slanhs_("1", &i__4, &h__[l + l * h_dim1], ldh, &work[1]
+ );
+ }
+/* Computing MAX */
+ r__2 = ulp * tst1;
+ if ((r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)) <= dmax(r__2,
+ smlnum)) {
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible. */
+
+ h__[l + (l - 1) * h_dim1] = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order <= MAXB has split off. */
+
+ if (l >= i__ - maxb + 1) {
+ goto L160;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! wantt) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 20) || (its == 30)) {
+
+/* Exceptional shifts. */
+
+ i__2 = i__;
+ for (ii = i__ - ns + 1; ii <= i__2; ++ii) {
+ wr[ii] = ((r__1 = h__[ii + (ii - 1) * h_dim1], dabs(r__1)) + (
+ r__2 = h__[ii + ii * h_dim1], dabs(r__2))) * 1.5f;
+ wi[ii] = 0.f;
+/* L80: */
+ }
+ } else {
+
+/* Use eigenvalues of trailing submatrix of order NS as shifts. */
+
+ slacpy_("Full", &ns, &ns, &h__[i__ - ns + 1 + (i__ - ns + 1) *
+ h_dim1], ldh, s, &c__15);
+ slahqr_(&c_false, &c_false, &ns, &c__1, &ns, s, &c__15, &wr[i__ -
+ ns + 1], &wi[i__ - ns + 1], &c__1, &ns, &z__[z_offset],
+ ldz, &ierr);
+ if (ierr > 0) {
+
+/*
+ If SLAHQR failed to compute all NS eigenvalues, use the
+ unconverged diagonal elements as the remaining shifts.
+*/
+
+ i__2 = ierr;
+ for (ii = 1; ii <= i__2; ++ii) {
+ wr[i__ - ns + ii] = s[ii + ii * 15 - 16];
+ wi[i__ - ns + ii] = 0.f;
+/* L90: */
+ }
+ }
+ }
+
+/*
+ Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns))
+ where G is the Hessenberg submatrix H(L:I,L:I) and w is
+ the vector of shifts (stored in WR and WI). The result is
+ stored in the local array V.
+*/
+
+ v[0] = 1.f;
+ i__2 = ns + 1;
+ for (ii = 2; ii <= i__2; ++ii) {
+ v[ii - 1] = 0.f;
+/* L100: */
+ }
+ nv = 1;
+ i__2 = i__;
+ for (j = i__ - ns + 1; j <= i__2; ++j) {
+ if (wi[j] >= 0.f) {
+ if (wi[j] == 0.f) {
+
+/* real shift */
+
+ i__4 = nv + 1;
+ scopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ r__1 = -wr[j];
+ sgemv_("No transpose", &i__4, &nv, &c_b1011, &h__[l + l *
+ h_dim1], ldh, vv, &c__1, &r__1, v, &c__1);
+ ++nv;
+ } else if (wi[j] > 0.f) {
+
+/* complex conjugate pair of shifts */
+
+ i__4 = nv + 1;
+ scopy_(&i__4, v, &c__1, vv, &c__1);
+ i__4 = nv + 1;
+ r__1 = wr[j] * -2.f;
+ sgemv_("No transpose", &i__4, &nv, &c_b1011, &h__[l + l *
+ h_dim1], ldh, v, &c__1, &r__1, vv, &c__1);
+ i__4 = nv + 1;
+ itemp = isamax_(&i__4, vv, &c__1);
+/* Computing MAX */
+ r__2 = (r__1 = vv[itemp - 1], dabs(r__1));
+ temp = 1.f / dmax(r__2,smlnum);
+ i__4 = nv + 1;
+ sscal_(&i__4, &temp, vv, &c__1);
+ absw = slapy2_(&wr[j], &wi[j]);
+ temp = temp * absw * absw;
+ i__4 = nv + 2;
+ i__5 = nv + 1;
+ sgemv_("No transpose", &i__4, &i__5, &c_b1011, &h__[l + l
+ * h_dim1], ldh, vv, &c__1, &temp, v, &c__1);
+ nv += 2;
+ }
+
+/*
+ Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero,
+ reset it to the unit vector.
+*/
+
+ itemp = isamax_(&nv, v, &c__1);
+ temp = (r__1 = v[itemp - 1], dabs(r__1));
+ if (temp == 0.f) {
+ v[0] = 1.f;
+ i__4 = nv;
+ for (ii = 2; ii <= i__4; ++ii) {
+ v[ii - 1] = 0.f;
+/* L110: */
+ }
+ } else {
+ temp = dmax(temp,smlnum);
+ r__1 = 1.f / temp;
+ sscal_(&nv, &r__1, v, &c__1);
+ }
+ }
+/* L120: */
+ }
+
+/* Multiple-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = l; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__4 = ns + 1, i__5 = i__ - k + 1;
+ nr = min(i__4,i__5);
+ if (k > l) {
+ scopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ slarfg_(&nr, v, &v[1], &c__1, &tau);
+ if (k > l) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ i__4 = i__;
+ for (ii = k + 1; ii <= i__4; ++ii) {
+ h__[ii + (k - 1) * h_dim1] = 0.f;
+/* L130: */
+ }
+ }
+ v[0] = 1.f;
+
+/*
+ Apply G from the left to transform the rows of the matrix in
+ columns K to I2.
+*/
+
+ i__4 = i2 - k + 1;
+ slarfx_("Left", &nr, &i__4, v, &tau, &h__[k + k * h_dim1], ldh, &
+ work[1]);
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+NR,I).
+
+ Computing MIN
+*/
+ i__5 = k + nr;
+ i__4 = min(i__5,i__) - i1 + 1;
+ slarfx_("Right", &i__4, &nr, v, &tau, &h__[i1 + k * h_dim1], ldh,
+ &work[1]);
+
+ if (wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ slarfx_("Right", &nh, &nr, v, &tau, &z__[*ilo + k * z_dim1],
+ ldz, &work[1]);
+ }
+/* L140: */
+ }
+
+/* L150: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L160:
+
+/*
+ A submatrix of order <= MAXB in rows and columns L to I has split
+ off. Use the double-shift QR algorithm to handle it.
+*/
+
+ slahqr_(&wantt, &wantz, n, &l, &i__, &h__[h_offset], ldh, &wr[1], &wi[1],
+ ilo, ihi, &z__[z_offset], ldz, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with a new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L50;
+
+L170:
+ work[1] = (real) max(1,*n);
+ return 0;
+
+/* End of SHSEQR */
+
+} /* shseqr_ */
+
+/* Subroutine */ int slabad_(real *small, real *large)
+{
+ /* Builtin functions */
+ double r_lg10(real *), sqrt(doublereal);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLABAD takes as input the values computed by SLAMCH for underflow and
+ overflow, and returns the square root of each of these values if the
+ log of LARGE is sufficiently large. This subroutine is intended to
+ identify machines with a large exponent range, such as the Crays, and
+ redefine the underflow and overflow limits to be the square roots of
+ the values computed by SLAMCH. This subroutine is needed because
+ SLAMCH does not compensate for poor arithmetic in the upper half of
+ the exponent range, as is found on a Cray.
+
+ Arguments
+ =========
+
+ SMALL (input/output) REAL
+ On entry, the underflow threshold as computed by SLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of SMALL, otherwise unchanged.
+
+ LARGE (input/output) REAL
+ On entry, the overflow threshold as computed by SLAMCH.
+ On exit, if LOG10(LARGE) is sufficiently large, the square
+ root of LARGE, otherwise unchanged.
+
+ =====================================================================
+
+
+ If it looks like we're on a Cray, take the square root of
+ SMALL and LARGE to avoid overflow and underflow problems.
+*/
+
+ if (r_lg10(large) > 2e3f) {
+ *small = sqrt(*small);
+ *large = sqrt(*large);
+ }
+
+ return 0;
+
+/* End of SLABAD */
+
+} /* slabad_ */
+
+/* Subroutine */ int slabrd_(integer *m, integer *n, integer *nb, real *a,
+ integer *lda, real *d__, real *e, real *tauq, real *taup, real *x,
+ integer *ldx, real *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, x_dim1, x_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+
+ /* Local variables */
+ static integer i__;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), slarfg_(
+ integer *, real *, real *, integer *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLABRD reduces the first NB rows and columns of a real general
+ m by n matrix A to upper or lower bidiagonal form by an orthogonal
+ transformation Q' * A * P, and returns the matrices X and Y which
+ are needed to apply the transformation to the unreduced part of A.
+
+ If m >= n, A is reduced to upper bidiagonal form; if m < n, to lower
+ bidiagonal form.
+
+ This is an auxiliary routine called by SGEBRD
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows in the matrix A.
+
+ N (input) INTEGER
+ The number of columns in the matrix A.
+
+ NB (input) INTEGER
+ The number of leading rows and columns of A to be reduced.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the m by n general matrix to be reduced.
+ On exit, the first NB rows and columns of the matrix are
+ overwritten; the rest of the array is unchanged.
+ If m >= n, elements on and below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors; and
+ elements above the diagonal in the first NB rows, with the
+ array TAUP, represent the orthogonal matrix P as a product
+ of elementary reflectors.
+ If m < n, elements below the diagonal in the first NB
+ columns, with the array TAUQ, represent the orthogonal
+ matrix Q as a product of elementary reflectors, and
+ elements on and above the diagonal in the first NB rows,
+ with the array TAUP, represent the orthogonal matrix P as
+ a product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ D (output) REAL array, dimension (NB)
+ The diagonal elements of the first NB rows and columns of
+ the reduced matrix. D(i) = A(i,i).
+
+ E (output) REAL array, dimension (NB)
+ The off-diagonal elements of the first NB rows and columns of
+ the reduced matrix.
+
+ TAUQ (output) REAL array dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix Q. See Further Details.
+
+ TAUP (output) REAL array, dimension (NB)
+ The scalar factors of the elementary reflectors which
+ represent the orthogonal matrix P. See Further Details.
+
+ X (output) REAL array, dimension (LDX,NB)
+ The m-by-nb matrix X required to update the unreduced part
+ of A.
+
+ LDX (input) INTEGER
+ The leading dimension of the array X. LDX >= M.
+
+ Y (output) REAL array, dimension (LDY,NB)
+ The n-by-nb matrix Y required to update the unreduced part
+ of A.
+
+ LDY (output) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrices Q and P are represented as products of elementary
+ reflectors:
+
+ Q = H(1) H(2) . . . H(nb) and P = G(1) G(2) . . . G(nb)
+
+ Each H(i) and G(i) has the form:
+
+ H(i) = I - tauq * v * v' and G(i) = I - taup * u * u'
+
+ where tauq and taup are real scalars, and v and u are real vectors.
+
+ If m >= n, v(1:i-1) = 0, v(i) = 1, and v(i:m) is stored on exit in
+ A(i:m,i); u(1:i) = 0, u(i+1) = 1, and u(i+1:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ If m < n, v(1:i) = 0, v(i+1) = 1, and v(i+1:m) is stored on exit in
+ A(i+2:m,i); u(1:i-1) = 0, u(i) = 1, and u(i:n) is stored on exit in
+ A(i,i+1:n); tauq is stored in TAUQ(i) and taup in TAUP(i).
+
+ The elements of the vectors v and u together form the m-by-nb matrix
+ V and the nb-by-n matrix U' which are needed, with X and Y, to apply
+ the transformation to the unreduced part of the matrix, using a block
+ update of the form: A := A - V*Y' - X*U'.
+
+ The contents of A on exit are illustrated by the following examples
+ with nb = 2:
+
+ m = 6 and n = 5 (m > n): m = 5 and n = 6 (m < n):
+
+ ( 1 1 u1 u1 u1 ) ( 1 u1 u1 u1 u1 u1 )
+ ( v1 1 1 u2 u2 ) ( 1 1 u2 u2 u2 u2 )
+ ( v1 v2 a a a ) ( v1 1 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a ) ( v1 v2 a a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix which is unchanged,
+ vi denotes an element of the vector defining H(i), and ui an element
+ of the vector defining G(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tauq;
+ --taup;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (*m >= *n) {
+
+/* Reduce to upper bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:m,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[i__ + a_dim1],
+ lda, &y[i__ + y_dim1], ldy, &c_b1011, &a[i__ + i__ *
+ a_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &x[i__ + x_dim1],
+ ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b1011, &a[i__ + i__ *
+ a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+1:m,i) */
+
+ i__2 = *m - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[min(i__3,*m) + i__ *
+ a_dim1], &c__1, &tauq[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + (i__ + 1)
+ * a_dim1], lda, &a[i__ + i__ * a_dim1], &c__1, &
+ c_b320, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + a_dim1],
+ lda, &a[i__ + i__ * a_dim1], &c__1, &c_b320, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b1011, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &x[i__ + x_dim1],
+ ldx, &a[i__ + i__ * a_dim1], &c__1, &c_b320, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1290, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b1011, &y[i__ + 1 + i__ * y_dim1], &c__1)
+ ;
+ i__2 = *n - i__;
+ sscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+
+/* Update A(i,i+1:n) */
+
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b1290, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + a_dim1], lda, &c_b1011, &a[i__
+ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1290, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &x[i__ + x_dim1], ldx, &c_b1011, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+2:n) */
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + (i__ + 1) * a_dim1], &a[i__ + min(
+ i__3,*n) * a_dim1], lda, &taup[i__]);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 + (
+ i__ + 1) * a_dim1], lda, &a[i__ + (i__ + 1) * a_dim1],
+ lda, &c_b320, &x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__, &c_b1011, &y[i__ + 1 +
+ y_dim1], ldy, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b320, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b1290, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b1011, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1011, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b320, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b1011, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ sscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Reduce to lower bidiagonal form */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i,i:n) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &y[i__ + y_dim1],
+ ldy, &a[i__ + a_dim1], lda, &c_b1011, &a[i__ + i__ *
+ a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1290, &a[i__ * a_dim1 + 1],
+ lda, &x[i__ + x_dim1], ldx, &c_b1011, &a[i__ + i__ *
+ a_dim1], lda);
+
+/* Generate reflection P(i) to annihilate A(i,i+1:n) */
+
+ i__2 = *n - i__ + 1;
+/* Computing MIN */
+ i__3 = i__ + 1;
+ slarfg_(&i__2, &a[i__ + i__ * a_dim1], &a[i__ + min(i__3,*n) *
+ a_dim1], lda, &taup[i__]);
+ d__[i__] = a[i__ + i__ * a_dim1];
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.f;
+
+/* Compute X(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__ + 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 +
+ i__ * a_dim1], lda, &a[i__ + i__ * a_dim1], lda, &
+ c_b320, &x[i__ + 1 + i__ * x_dim1], &c__1)
+ ;
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &y[i__ + y_dim1],
+ ldy, &a[i__ + i__ * a_dim1], lda, &c_b320, &x[i__ *
+ x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[i__ + 1 +
+ a_dim1], lda, &x[i__ * x_dim1 + 1], &c__1, &c_b1011, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1011, &a[i__ *
+ a_dim1 + 1], lda, &a[i__ + i__ * a_dim1], lda, &
+ c_b320, &x[i__ * x_dim1 + 1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &x[i__ + 1 +
+ x_dim1], ldx, &x[i__ * x_dim1 + 1], &c__1, &c_b1011, &
+ x[i__ + 1 + i__ * x_dim1], &c__1);
+ i__2 = *m - i__;
+ sscal_(&i__2, &taup[i__], &x[i__ + 1 + i__ * x_dim1], &c__1);
+
+/* Update A(i+1:m,i) */
+
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[i__ + 1 +
+ a_dim1], lda, &y[i__ + y_dim1], ldy, &c_b1011, &a[i__
+ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("No transpose", &i__2, &i__, &c_b1290, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ * a_dim1 + 1], &c__1, &c_b1011, &
+ a[i__ + 1 + i__ * a_dim1], &c__1);
+
+/* Generate reflection Q(i) to annihilate A(i+2:m,i) */
+
+ i__2 = *m - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*m) +
+ i__ * a_dim1], &c__1, &tauq[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute Y(i+1:n,i) */
+
+ i__2 = *m - i__;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 + (i__
+ + 1) * a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &c_b320, &y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b320, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &y[i__ + 1 +
+ y_dim1], ldy, &y[i__ * y_dim1 + 1], &c__1, &c_b1011, &
+ y[i__ + 1 + i__ * y_dim1], &c__1);
+ i__2 = *m - i__;
+ sgemv_("Transpose", &i__2, &i__, &c_b1011, &x[i__ + 1 +
+ x_dim1], ldx, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b320, &y[i__ * y_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("Transpose", &i__, &i__2, &c_b1290, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &y[i__ * y_dim1 + 1], &c__1, &
+ c_b1011, &y[i__ + 1 + i__ * y_dim1], &c__1)
+ ;
+ i__2 = *n - i__;
+ sscal_(&i__2, &tauq[i__], &y[i__ + 1 + i__ * y_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ return 0;
+
+/* End of SLABRD */
+
+} /* slabrd_ */
+
+/* Subroutine */ int slacpy_(char *uplo, integer *m, integer *n, real *a,
+ integer *lda, real *b, integer *ldb)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLACPY copies all or part of a two-dimensional matrix A to another
+ matrix B.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be copied to B.
+ = 'U': Upper triangular part
+ = 'L': Lower triangular part
+ Otherwise: All of the matrix A
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ The m by n matrix A. If UPLO = 'U', only the upper triangle
+ or trapezoid is accessed; if UPLO = 'L', only the lower
+ triangle or trapezoid is accessed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ B (output) REAL array, dimension (LDB,N)
+ On exit, B = A in the locations specified by UPLO.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if (lsame_(uplo, "L")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ b[i__ + j * b_dim1] = a[i__ + j * a_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+ return 0;
+
+/* End of SLACPY */
+
+} /* slacpy_ */
+
+/* Subroutine */ int sladiv_(real *a, real *b, real *c__, real *d__, real *p,
+ real *q)
+{
+ static real e, f;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLADIV performs complex division in real arithmetic
+
+ a + i*b
+ p + i*q = ---------
+ c + i*d
+
+ The algorithm is due to Robert L. Smith and can be found
+ in D. Knuth, The art of Computer Programming, Vol.2, p.195
+
+ Arguments
+ =========
+
+ A (input) REAL
+ B (input) REAL
+ C (input) REAL
+ D (input) REAL
+ The scalars a, b, c, and d in the above expression.
+
+ P (output) REAL
+ Q (output) REAL
+ The scalars p and q in the above expression.
+
+ =====================================================================
+*/
+
+
+ if (dabs(*d__) < dabs(*c__)) {
+ e = *d__ / *c__;
+ f = *c__ + *d__ * e;
+ *p = (*a + *b * e) / f;
+ *q = (*b - *a * e) / f;
+ } else {
+ e = *c__ / *d__;
+ f = *d__ + *c__ * e;
+ *p = (*b + *a * e) / f;
+ *q = (-(*a) + *b * e) / f;
+ }
+
+ return 0;
+
+/* End of SLADIV */
+
+} /* sladiv_ */
+
+/* Subroutine */ int slae2_(real *a, real *b, real *c__, real *rt1, real *rt2)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real ab, df, tb, sm, rt, adf, acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAE2 computes the eigenvalues of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, and RT2
+ is the eigenvalue of smaller absolute value.
+
+ Arguments
+ =========
+
+ A (input) REAL
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) REAL
+ The (1,2) and (2,1) elements of the 2-by-2 matrix.
+
+ C (input) REAL
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) REAL
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) REAL
+ The eigenvalue of smaller absolute value.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = dabs(df);
+ tb = *b + *b;
+ ab = dabs(tb);
+ if (dabs(*a) > dabs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ r__1 = ab / adf;
+ rt = adf * sqrt(r__1 * r__1 + 1.f);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ r__1 = adf / ab;
+ rt = ab * sqrt(r__1 * r__1 + 1.f);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.f);
+ }
+ if (sm < 0.f) {
+ *rt1 = (sm - rt) * .5f;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.f) {
+ *rt1 = (sm + rt) * .5f;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5f;
+ *rt2 = rt * -.5f;
+ }
+ return 0;
+
+/* End of SLAE2 */
+
+} /* slae2_ */
+
+/* Subroutine */ int slaed0_(integer *icompq, integer *qsiz, integer *n, real
+ *d__, real *e, real *q, integer *ldq, real *qstore, integer *ldqs,
+ real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2;
+ static real temp;
+ static integer curr;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer iperm, indxq, iwrem;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static integer iqptr, tlvls;
+ extern /* Subroutine */ int slaed1_(integer *, real *, real *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *),
+ slaed7_(integer *, integer *, integer *, integer *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+ , real *, integer *, integer *, integer *, integer *, integer *,
+ real *, real *, integer *, integer *);
+ static integer igivcl;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static integer igivnm, submat;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ static integer curprb, subpbs, igivpt, curlvl, matsiz, iprmpt, smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAED0 computes all eigenvalues and corresponding eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+ = 2: Compute eigenvalues and eigenvectors of tridiagonal
+ matrix.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the main diagonal of the tridiagonal matrix.
+ On exit, its eigenvalues.
+
+ E (input) REAL array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Q (input/output) REAL array, dimension (LDQ, N)
+ On entry, Q must contain an N-by-N orthogonal matrix.
+ If ICOMPQ = 0 Q is not referenced.
+ If ICOMPQ = 1 On entry, Q is a subset of the columns of the
+ orthogonal matrix used to reduce the full
+ matrix to tridiagonal form corresponding to
+ the subset of the full matrix which is being
+ decomposed at this time.
+ If ICOMPQ = 2 On entry, Q will be the identity matrix.
+ On exit, Q contains the eigenvectors of the
+ tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. If eigenvectors are
+ desired, then LDQ >= max(1,N). In any case, LDQ >= 1.
+
+ QSTORE (workspace) REAL array, dimension (LDQS, N)
+ Referenced only when ICOMPQ = 1. Used to store parts of
+ the eigenvector matrix when the updating matrix multiplies
+ take place.
+
+ LDQS (input) INTEGER
+ The leading dimension of the array QSTORE. If ICOMPQ = 1,
+ then LDQS >= max(1,N). In any case, LDQS >= 1.
+
+ WORK (workspace) REAL array,
+ If ICOMPQ = 0 or 1, the dimension of WORK must be at least
+ 1 + 3*N + 2*N*lg N + 2*N**2
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of WORK must be at least
+ 4*N + N**2.
+
+ IWORK (workspace) INTEGER array,
+ If ICOMPQ = 0 or 1, the dimension of IWORK must be at least
+ 6 + 6*N + 5*N*lg N.
+ ( lg( N ) = smallest integer k
+ such that 2^k >= N )
+ If ICOMPQ = 2, the dimension of IWORK must be at least
+ 3 + 5*N.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ qstore_dim1 = *ldqs;
+ qstore_offset = 1 + qstore_dim1;
+ qstore -= qstore_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 2)) {
+ *info = -1;
+ } else if (*icompq == 1 && *qsiz < max(0,*n)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if (*ldqs < max(1,*n)) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED0", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "SLAED0", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ Determine the size and placement of the submatrices, and save in
+ the leading elements of IWORK.
+*/
+
+ iwork[1] = *n;
+ subpbs = 1;
+ tlvls = 0;
+L10:
+ if (iwork[subpbs] > smlsiz) {
+ for (j = subpbs; j >= 1; --j) {
+ iwork[j * 2] = (iwork[j] + 1) / 2;
+ iwork[((j) << (1)) - 1] = iwork[j] / 2;
+/* L20: */
+ }
+ ++tlvls;
+ subpbs <<= 1;
+ goto L10;
+ }
+ i__1 = subpbs;
+ for (j = 2; j <= i__1; ++j) {
+ iwork[j] += iwork[j - 1];
+/* L30: */
+ }
+
+/*
+ Divide the matrix into SUBPBS submatrices of size at most SMLSIZ+1
+ using rank-1 modifications (cuts).
+*/
+
+ spm1 = subpbs - 1;
+ i__1 = spm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ submat = iwork[i__] + 1;
+ smm1 = submat - 1;
+ d__[smm1] -= (r__1 = e[smm1], dabs(r__1));
+ d__[submat] -= (r__1 = e[smm1], dabs(r__1));
+/* L40: */
+ }
+
+ indxq = ((*n) << (2)) + 3;
+ if (*icompq != 2) {
+
+/*
+ Set up workspaces for eigenvalues only/accumulate new vectors
+ routine
+*/
+
+ temp = log((real) (*n)) / log(2.f);
+ lgn = (integer) temp;
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ iprmpt = indxq + *n + 1;
+ iperm = iprmpt + *n * lgn;
+ iqptr = iperm + *n * lgn;
+ igivpt = iqptr + *n + 2;
+ igivcl = igivpt + *n * lgn;
+
+ igivnm = 1;
+ iq = igivnm + ((*n) << (1)) * lgn;
+/* Computing 2nd power */
+ i__1 = *n;
+ iwrem = iq + i__1 * i__1 + 1;
+
+/* Initialize pointers */
+
+ i__1 = subpbs;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ iwork[iprmpt + i__] = 1;
+ iwork[igivpt + i__] = 1;
+/* L50: */
+ }
+ iwork[iqptr] = 1;
+ }
+
+/*
+ Solve each submatrix eigenproblem at the bottom of the divide and
+ conquer tree.
+*/
+
+ curr = 0;
+ i__1 = spm1;
+ for (i__ = 0; i__ <= i__1; ++i__) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[1];
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 1] - iwork[i__];
+ }
+ if (*icompq == 2) {
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &q[submat +
+ submat * q_dim1], ldq, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ } else {
+ ssteqr_("I", &matsiz, &d__[submat], &e[submat], &work[iq - 1 +
+ iwork[iqptr + curr]], &matsiz, &work[1], info);
+ if (*info != 0) {
+ goto L130;
+ }
+ if (*icompq == 1) {
+ sgemm_("N", "N", qsiz, &matsiz, &matsiz, &c_b1011, &q[submat *
+ q_dim1 + 1], ldq, &work[iq - 1 + iwork[iqptr + curr]]
+ , &matsiz, &c_b320, &qstore[submat * qstore_dim1 + 1],
+ ldqs);
+ }
+/* Computing 2nd power */
+ i__2 = matsiz;
+ iwork[iqptr + curr + 1] = iwork[iqptr + curr] + i__2 * i__2;
+ ++curr;
+ }
+ k = 1;
+ i__2 = iwork[i__ + 1];
+ for (j = submat; j <= i__2; ++j) {
+ iwork[indxq + j] = k;
+ ++k;
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ Successively merge eigensystems of adjacent submatrices
+ into eigensystem for the corresponding larger matrix.
+
+ while ( SUBPBS > 1 )
+*/
+
+ curlvl = 1;
+L80:
+ if (subpbs > 1) {
+ spm2 = subpbs - 2;
+ i__1 = spm2;
+ for (i__ = 0; i__ <= i__1; i__ += 2) {
+ if (i__ == 0) {
+ submat = 1;
+ matsiz = iwork[2];
+ msd2 = iwork[1];
+ curprb = 0;
+ } else {
+ submat = iwork[i__] + 1;
+ matsiz = iwork[i__ + 2] - iwork[i__];
+ msd2 = matsiz / 2;
+ ++curprb;
+ }
+
+/*
+ Merge lower order eigensystems (of size MSD2 and MATSIZ - MSD2)
+ into an eigensystem of size MATSIZ.
+ SLAED1 is used only for the full eigensystem of a tridiagonal
+ matrix.
+ SLAED7 handles the cases in which eigenvalues only or eigenvalues
+ and eigenvectors of a full symmetric matrix (which was reduced to
+ tridiagonal form) are desired.
+*/
+
+ if (*icompq == 2) {
+ slaed1_(&matsiz, &d__[submat], &q[submat + submat * q_dim1],
+ ldq, &iwork[indxq + submat], &e[submat + msd2 - 1], &
+ msd2, &work[1], &iwork[subpbs + 1], info);
+ } else {
+ slaed7_(icompq, &matsiz, qsiz, &tlvls, &curlvl, &curprb, &d__[
+ submat], &qstore[submat * qstore_dim1 + 1], ldqs, &
+ iwork[indxq + submat], &e[submat + msd2 - 1], &msd2, &
+ work[iq], &iwork[iqptr], &iwork[iprmpt], &iwork[iperm]
+ , &iwork[igivpt], &iwork[igivcl], &work[igivnm], &
+ work[iwrem], &iwork[subpbs + 1], info);
+ }
+ if (*info != 0) {
+ goto L130;
+ }
+ iwork[i__ / 2 + 1] = iwork[i__ + 2];
+/* L90: */
+ }
+ subpbs /= 2;
+ ++curlvl;
+ goto L80;
+ }
+
+/*
+ end while
+
+ Re-merge the eigenvalues/vectors which were deflated at the final
+ merge step.
+*/
+
+ if (*icompq == 1) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ scopy_(qsiz, &qstore[j * qstore_dim1 + 1], &c__1, &q[i__ * q_dim1
+ + 1], &c__1);
+/* L100: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ } else if (*icompq == 2) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+ scopy_(n, &q[j * q_dim1 + 1], &c__1, &work[*n * i__ + 1], &c__1);
+/* L110: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ slacpy_("A", n, n, &work[*n + 1], n, &q[q_offset], ldq);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ j = iwork[indxq + i__];
+ work[i__] = d__[j];
+/* L120: */
+ }
+ scopy_(n, &work[1], &c__1, &d__[1], &c__1);
+ }
+ goto L140;
+
+L130:
+ *info = submat * (*n + 1) + submat + matsiz - 1;
+
+L140:
+ return 0;
+
+/* End of SLAED0 */
+
+} /* slaed0_ */
+
+/* Subroutine */ int slaed1_(integer *n, real *d__, real *q, integer *ldq,
+ integer *indxq, real *rho, integer *cutpnt, real *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, cpp1, indx, indxc, indxp;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slaed2_(integer *, integer *, integer *, real *, real
+ *, integer *, integer *, real *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, integer *), slaed3_(
+ integer *, integer *, integer *, real *, real *, integer *, real *
+ , real *, real *, integer *, integer *, real *, real *, integer *)
+ ;
+ static integer idlmda;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAED1 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and eigenvectors of a tridiagonal matrix. SLAED7 handles
+ the case in which eigenvalues only or eigenvalues and eigenvectors
+ of a full symmetric matrix (which was reduced to tridiagonal form)
+ are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine SLAED2.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine SLAED4 (as called by SLAED3).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) REAL array, dimension (LDQ,N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ On entry, the permutation which separately sorts the two
+ subproblems in D into ascending order.
+ On exit, the permutation which will reintegrate the
+ subproblems back into sorted order,
+ i.e. D( INDXQ( I = 1, N ) ) will be in ascending order.
+
+ RHO (input) REAL
+ The subdiagonal entry used to create the rank-1 modification.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= CUTPNT <= N/2.
+
+ WORK (workspace) REAL array, dimension (4*N + N**2)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if (*ldq < max(1,*n)) {
+ *info = -4;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if ((min(i__1,i__2) > *cutpnt) || (*n / 2 < *cutpnt)) {
+ *info = -7;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED1", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are integer pointers which indicate
+ the portion of the workspace
+ used by a particular array in SLAED2 and SLAED3.
+*/
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ scopy_(cutpnt, &q[*cutpnt + q_dim1], ldq, &work[iz], &c__1);
+ cpp1 = *cutpnt + 1;
+ i__1 = *n - *cutpnt;
+ scopy_(&i__1, &q[cpp1 + cpp1 * q_dim1], ldq, &work[iz + *cutpnt], &c__1);
+
+/* Deflate eigenvalues. */
+
+ slaed2_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, &indxq[1], rho, &work[
+ iz], &work[idlmda], &work[iw], &work[iq2], &iwork[indx], &iwork[
+ indxc], &iwork[indxp], &iwork[coltyp], info);
+
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ is = (iwork[coltyp] + iwork[coltyp + 1]) * *cutpnt + (iwork[coltyp +
+ 1] + iwork[coltyp + 2]) * (*n - *cutpnt) + iq2;
+ slaed3_(&k, n, cutpnt, &d__[1], &q[q_offset], ldq, rho, &work[idlmda],
+ &work[iq2], &iwork[indxc], &iwork[coltyp], &work[iw], &work[
+ is], info);
+ if (*info != 0) {
+ goto L20;
+ }
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L10: */
+ }
+ }
+
+L20:
+ return 0;
+
+/* End of SLAED1 */
+
+} /* slaed1_ */
+
+/* Subroutine */ int slaed2_(integer *k, integer *n, integer *n1, real *d__,
+ real *q, integer *ldq, integer *indxq, real *rho, real *z__, real *
+ dlamda, real *w, real *q2, integer *indx, integer *indxc, integer *
+ indxp, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real c__;
+ static integer i__, j;
+ static real s, t;
+ static integer k2, n2, ct, nj, pj, js, iq1, iq2, n1p1;
+ static real eps, tau, tol;
+ static integer psm[4], imax, jmax, ctot[4];
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sscal_(integer *, real *, real *,
+ integer *), scopy_(integer *, real *, integer *, real *, integer *
+ );
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *), slacpy_(char *, integer *, integer *,
+ real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLAED2 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation. 0 <= K <=N.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading sub-matrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, D contains the eigenvalues of the two submatrices to
+ be combined.
+ On exit, D contains the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) REAL array, dimension (LDQ, N)
+ On entry, Q contains the eigenvectors of two submatrices in
+ the two square blocks with corners at (1,1), (N1,N1)
+ and (N1+1, N1+1), (N,N).
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input/output) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have N1 added to their
+ values. Destroyed on exit.
+
+ RHO (input/output) REAL
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ SLAED3.
+
+ Z (input) REAL array, dimension (N)
+ On entry, Z contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+ On exit, the contents of Z have been destroyed by the updating
+ process.
+
+ DLAMDA (output) REAL array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ SLAED3 to form the secular equation.
+
+ W (output) REAL array, dimension (N)
+ The first k values of the final deflation-altered z-vector
+ which will be passed to SLAED3.
+
+ Q2 (output) REAL array, dimension (N1**2+(N-N1)**2)
+ A copy of the first K eigenvectors which will be used by
+ SLAED3 in a matrix multiply (SGEMM) to solve for the new
+ eigenvectors.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of DLAMDA into
+ ascending order.
+
+ INDXC (output) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups: the first group contains non-zero
+ elements only at and above N1, the second contains
+ non-zero elements only below N1, and the third is dense.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ COLTYP (workspace/output) INTEGER array, dimension (N)
+ During execution, a label which will indicate which of the
+ following types a column in the Q2 matrix is:
+ 1 : non-zero in the upper half only;
+ 2 : dense;
+ 3 : non-zero in the lower half only;
+ 4 : deflated.
+ On exit, COLTYP(i) is the number of columns of type i,
+ for i=1 to 4 only.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ --w;
+ --q2;
+ --indx;
+ --indxc;
+ --indxp;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MIN */
+ i__1 = 1, i__2 = *n / 2;
+ if ((min(i__1,i__2) > *n1) || (*n / 2 < *n1)) {
+ *info = -3;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n2 = *n - *n1;
+ n1p1 = *n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b1290, &z__[n1p1], &c__1);
+ }
+
+/*
+ Normalize z so that norm(z) = 1. Since z is the concatenation of
+ two normalized vectors, norm2(z) = sqrt(2).
+*/
+
+ t = 1.f / sqrt(2.f);
+ sscal_(n, &t, &z__[1], &c__1);
+
+/* RHO = ABS( norm(z)**2 * RHO ) */
+
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ indxq[i__] += *n1;
+/* L10: */
+ }
+
+/* re-integrate the deflated parts from the last pass */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+/* L20: */
+ }
+ slamrg_(n1, &n2, &dlamda[1], &c__1, &c__1, &indxc[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indx[i__] = indxq[indxc[i__]];
+/* L30: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__3 = (r__1 = d__[jmax], dabs(r__1)), r__4 = (r__2 = z__[imax], dabs(
+ r__2));
+ tol = eps * 8.f * dmax(r__3,r__4);
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ iq2 = 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__ = indx[j];
+ scopy_(n, &q[i__ * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ dlamda[j] = d__[i__];
+ iq2 += *n;
+/* L40: */
+ }
+ slacpy_("A", n, n, &q2[1], n, &q[q_offset], ldq);
+ scopy_(n, &dlamda[1], &c__1, &d__[1], &c__1);
+ goto L190;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ i__1 = *n1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L50: */
+ }
+ i__1 = *n;
+ for (i__ = n1p1; i__ <= i__1; ++i__) {
+ coltyp[i__] = 3;
+/* L60: */
+ }
+
+
+ *k = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ nj = indx[j];
+ if (*rho * (r__1 = z__[nj], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ if (j == *n) {
+ goto L100;
+ }
+ } else {
+ pj = nj;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ nj = indx[j];
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (r__1 = z__[nj], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ coltyp[nj] = 4;
+ indxp[k2] = nj;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[pj];
+ c__ = z__[nj];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = slapy2_(&c__, &s);
+ t = d__[nj] - d__[pj];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[nj] = tau;
+ z__[pj] = 0.f;
+ if (coltyp[nj] != coltyp[pj]) {
+ coltyp[nj] = 2;
+ }
+ coltyp[pj] = 4;
+ srot_(n, &q[pj * q_dim1 + 1], &c__1, &q[nj * q_dim1 + 1], &c__1, &
+ c__, &s);
+/* Computing 2nd power */
+ r__1 = c__;
+/* Computing 2nd power */
+ r__2 = s;
+ t = d__[pj] * (r__1 * r__1) + d__[nj] * (r__2 * r__2);
+/* Computing 2nd power */
+ r__1 = s;
+/* Computing 2nd power */
+ r__2 = c__;
+ d__[nj] = d__[pj] * (r__1 * r__1) + d__[nj] * (r__2 * r__2);
+ d__[pj] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[pj] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = pj;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = pj;
+ }
+ pj = nj;
+ } else {
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+ pj = nj;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ dlamda[*k] = d__[pj];
+ w[*k] = z__[pj];
+ indxp[*k] = pj;
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four uniform groups (although one or more of these groups may be
+ empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L110: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L120: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 1;
+ psm[1] = ctot[0] + 1;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+ *k = *n - ctot[3];
+
+/*
+ Fill out the INDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's.
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ js = indxp[j];
+ ct = coltyp[js];
+ indx[psm[ct - 1]] = js;
+ indxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L130: */
+ }
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ i__ = 1;
+ iq1 = 1;
+ iq2 = (ctot[0] + ctot[1]) * *n1 + 1;
+ i__1 = ctot[0];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+/* L140: */
+ }
+
+ i__1 = ctot[1];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n1, &q[js * q_dim1 + 1], &c__1, &q2[iq1], &c__1);
+ scopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq1 += *n1;
+ iq2 += n2;
+/* L150: */
+ }
+
+ i__1 = ctot[2];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(&n2, &q[*n1 + 1 + js * q_dim1], &c__1, &q2[iq2], &c__1);
+ z__[i__] = d__[js];
+ ++i__;
+ iq2 += n2;
+/* L160: */
+ }
+
+ iq1 = iq2;
+ i__1 = ctot[3];
+ for (j = 1; j <= i__1; ++j) {
+ js = indx[i__];
+ scopy_(n, &q[js * q_dim1 + 1], &c__1, &q2[iq2], &c__1);
+ iq2 += *n;
+ z__[i__] = d__[js];
+ ++i__;
+/* L170: */
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ slacpy_("A", n, &ctot[3], &q2[iq1], n, &q[(*k + 1) * q_dim1 + 1], ldq);
+ i__1 = *n - *k;
+ scopy_(&i__1, &z__[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/* Copy CTOT into COLTYP for referencing in SLAED3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L180: */
+ }
+
+L190:
+ return 0;
+
+/* End of SLAED2 */
+
+} /* slaed2_ */
+
+/* Subroutine */ int slaed3_(integer *k, integer *n, integer *n1, real *d__,
+ real *q, integer *ldq, real *rho, real *dlamda, real *q2, integer *
+ indx, integer *ctot, real *w, real *s, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer i__, j, n2, n12, ii, n23, iq2;
+ static real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), scopy_(integer *, real *,
+ integer *, real *, integer *), slaed4_(integer *, integer *, real
+ *, real *, real *, real *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slacpy_(
+ char *, integer *, integer *, real *, integer *, real *, integer *
+ ), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAED3 finds the roots of the secular equation, as defined by the
+ values in D, W, and RHO, between 1 and K. It makes the
+ appropriate calls to SLAED4 and then updates the eigenvectors by
+ multiplying the matrix of eigenvectors of the pair of eigensystems
+ being combined by the matrix of eigenvectors of the K-by-K system
+ which is solved here.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ SLAED4. K >= 0.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (deflation may result in N>K).
+
+ N1 (input) INTEGER
+ The location of the last eigenvalue in the leading submatrix.
+ min(1,N) <= N1 <= N/2.
+
+ D (output) REAL array, dimension (N)
+ D(I) contains the updated eigenvalues for
+ 1 <= I <= K.
+
+ Q (output) REAL array, dimension (LDQ,N)
+ Initially the first K columns are used as workspace.
+ On output the columns 1 to K contain
+ the updated eigenvectors.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ RHO (input) REAL
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input/output) REAL array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation. May be changed on output by
+ having lowest order bit set to zero on Cray X-MP, Cray Y-MP,
+ Cray-2, or Cray C-90, as described above.
+
+ Q2 (input) REAL array, dimension (LDQ2, N)
+ The first K columns of this matrix contain the non-deflated
+ eigenvectors for the split problem.
+
+ INDX (input) INTEGER array, dimension (N)
+ The permutation used to arrange the columns of the deflated
+ Q matrix into three groups (see SLAED2).
+ The rows of the eigenvectors found by SLAED4 must be likewise
+ permuted before the matrix multiply can take place.
+
+ CTOT (input) INTEGER array, dimension (4)
+ A count of the total number of the various types of columns
+ in Q, as described in INDX. The fourth column type is any
+ column which has been deflated.
+
+ W (input/output) REAL array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector. Destroyed on
+ output.
+
+ S (workspace) REAL array, dimension (N1 + 1)*K
+ Will contain the eigenvectors of the repaired matrix which
+ will be multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max(1,K).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --q2;
+ --indx;
+ --ctot;
+ --w;
+ --s;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if (*n < *k) {
+ *info = -2;
+ } else if (*ldq < max(1,*n)) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = slamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if (*k == 1) {
+ goto L110;
+ }
+ if (*k == 2) {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ w[1] = q[j * q_dim1 + 1];
+ w[2] = q[j * q_dim1 + 2];
+ ii = indx[1];
+ q[j * q_dim1 + 1] = w[ii];
+ ii = indx[2];
+ q[j * q_dim1 + 2] = w[ii];
+/* L30: */
+ }
+ goto L110;
+ }
+
+/* Compute updated W. */
+
+ scopy_(k, &w[1], &c__1, &s[1], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ scopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L40: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+/* L60: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = sqrt(-w[i__]);
+ w[i__] = r_sign(&r__1, &s[i__]);
+/* L70: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__] = w[i__] / q[i__ + j * q_dim1];
+/* L80: */
+ }
+ temp = snrm2_(k, &s[1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ ii = indx[i__];
+ q[i__ + j * q_dim1] = s[ii] / temp;
+/* L90: */
+ }
+/* L100: */
+ }
+
+/* Compute the updated eigenvectors. */
+
+L110:
+
+ n2 = *n - *n1;
+ n12 = ctot[1] + ctot[2];
+ n23 = ctot[2] + ctot[3];
+
+ slacpy_("A", &n23, k, &q[ctot[1] + 1 + q_dim1], ldq, &s[1], &n23);
+ iq2 = *n1 * n12 + 1;
+ if (n23 != 0) {
+ sgemm_("N", "N", &n2, k, &n23, &c_b1011, &q2[iq2], &n2, &s[1], &n23, &
+ c_b320, &q[*n1 + 1 + q_dim1], ldq);
+ } else {
+ slaset_("A", &n2, k, &c_b320, &c_b320, &q[*n1 + 1 + q_dim1], ldq);
+ }
+
+ slacpy_("A", &n12, k, &q[q_offset], ldq, &s[1], &n12);
+ if (n12 != 0) {
+ sgemm_("N", "N", n1, k, &n12, &c_b1011, &q2[1], n1, &s[1], &n12, &
+ c_b320, &q[q_offset], ldq);
+ } else {
+ slaset_("A", n1, k, &c_b320, &c_b320, &q[q_dim1 + 1], ldq);
+ }
+
+
+L120:
+ return 0;
+
+/* End of SLAED3 */
+
+} /* slaed3_ */
+
+/* Subroutine */ int slaed4_(integer *n, integer *i__, real *d__, real *z__,
+ real *delta, real *rho, real *dlam, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real a, b, c__;
+ static integer j;
+ static real w;
+ static integer ii;
+ static real dw, zz[3];
+ static integer ip1;
+ static real del, eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static real dphi, dpsi;
+ static integer iter;
+ static real temp, prew, temp1, dltlb, dltub, midpt;
+ static integer niter;
+ static logical swtch;
+ extern /* Subroutine */ int slaed5_(integer *, real *, real *, real *,
+ real *, real *), slaed6_(integer *, logical *, real *, real *,
+ real *, real *, real *, integer *);
+ static logical swtch3;
+ extern doublereal slamch_(char *);
+ static logical orgati;
+ static real erretm, rhoinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ December 23, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th updated eigenvalue of a symmetric
+ rank-one modification to a diagonal matrix whose elements are
+ given in the array d, and that
+
+ D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) REAL array, dimension (N)
+ The original eigenvalues. It is assumed that they are in
+ order, D(I) < D(J) for I < J.
+
+ Z (input) REAL array, dimension (N)
+ The components of the updating vector.
+
+ DELTA (output) REAL array, dimension (N)
+ If N .ne. 1, DELTA contains (D(j) - lambda_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ eigenvectors.
+
+ RHO (input) REAL
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) REAL
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *dlam = d__[1] + *rho * z__[1] * z__[1];
+ delta[1] = 1.f;
+ return 0;
+ }
+ if (*n == 2) {
+ slaed5_(i__, &d__[1], &z__[1], &delta[1], rho, dlam);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = slamch_("Epsilon");
+ rhoinv = 1.f / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ midpt = *rho / 2.f;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L10: */
+ }
+
+ psi = 0.f;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / delta[ii] + z__[*n] * z__[*n] / delta[*
+ n];
+
+ if (w <= 0.f) {
+ temp = z__[*n - 1] * z__[*n - 1] / (d__[*n] - d__[*n - 1] + *rho)
+ + z__[*n] * z__[*n] / *rho;
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n]
+ ;
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)+RHO/2 <= LAMBDA(N) < D(N)+TAU <= D(N)+RHO
+*/
+
+ dltlb = midpt;
+ dltub = *rho;
+ } else {
+ del = d__[*n] - d__[*n - 1];
+ a = -c__ * del + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+
+/*
+ It can be proved that
+ D(N) < D(N)+TAU < LAMBDA(N) < D(N)+RHO/2
+*/
+
+ dltlb = 0.f;
+ dltub = midpt;
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] * (
+ dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (c__ < 0.f) {
+ c__ = dabs(c__);
+ }
+ if (c__ == 0.f) {
+/*
+ ETA = B/A
+ ETA = RHO - TAU
+*/
+ eta = dltub - tau;
+ } else if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L50: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ *dlam = d__[*i__] + tau;
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ c__ = w - delta[*n - 1] * dpsi - delta[*n] * dphi;
+ a = (delta[*n - 1] + delta[*n]) * w - delta[*n - 1] * delta[*n] *
+ (dpsi + dphi);
+ b = delta[*n - 1] * delta[*n] * w;
+ if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L70: */
+ }
+
+ tau += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / delta[*n];
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) *
+ (dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ *dlam = d__[*i__] + tau;
+ goto L250;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ del = d__[ip1] - d__[*i__];
+ midpt = del / 2.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - midpt;
+/* L100: */
+ }
+
+ psi = 0.f;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / delta[j];
+/* L110: */
+ }
+
+ phi = 0.f;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / delta[j];
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / delta[*i__] + z__[ip1] * z__[ip1] /
+ delta[ip1];
+
+ if (w > 0.f) {
+
+/*
+ d(i)< the ith eigenvalue < (d(i)+d(i+1))/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ a = c__ * del + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * del;
+ if (a > 0.f) {
+ tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ }
+ dltlb = 0.f;
+ dltub = midpt;
+ } else {
+
+/*
+ (d(i)+d(i+1))/2 <= the ith eigenvalue < d(i+1)
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ a = c__ * del - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * del;
+ if (a < 0.f) {
+ tau = b * 2.f / (a - sqrt((r__1 = a * a + b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = -(a + sqrt((r__1 = a * a + b * 4.f * c__, dabs(r__1))))
+ / (c__ * 2.f);
+ }
+ dltlb = -midpt;
+ dltub = 0.f;
+ }
+
+ if (orgati) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - tau;
+/* L130: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[ip1] - tau;
+/* L140: */
+ }
+ }
+ if (orgati) {
+ ii = *i__;
+ } else {
+ ii = *i__ + 1;
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.f) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.f) {
+ swtch3 = TRUE_;
+ }
+ }
+ if ((ii == 1) || (ii == *n)) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (r__1 *
+ r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) * (r__1 *
+ r__1);
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1] *
+ dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] * delta[ip1] *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[*i__] *
+ (dpsi + dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1] - d__[
+ iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 + dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1] - d__[
+ iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi - temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ slaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+
+ prew = w;
+
+/* L170: */
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L180: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L190: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L200: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + (r__1 = tau + eta, dabs(r__1)) * dw;
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ }
+
+ tau += eta;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 30; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+ goto L250;
+ }
+
+ if (w <= 0.f) {
+ dltlb = dmax(dltlb,tau);
+ } else {
+ dltub = dmin(dltub,tau);
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / delta[*i__];
+ c__ = w - delta[ip1] * dw - (d__[*i__] - d__[ip1]) * (
+ r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / delta[ip1];
+ c__ = w - delta[*i__] * dw - (d__[ip1] - d__[*i__]) *
+ (r__1 * r__1);
+ }
+ } else {
+ temp = z__[ii] / delta[ii];
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - delta[*i__] * dpsi - delta[ip1] * dphi;
+ }
+ a = (delta[*i__] + delta[ip1]) * w - delta[*i__] * delta[ip1]
+ * dw;
+ b = delta[*i__] * delta[ip1] * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + delta[ip1] *
+ delta[ip1] * (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + delta[*i__] * delta[
+ *i__] * (dpsi + dphi);
+ }
+ } else {
+ a = delta[*i__] * delta[*i__] * dpsi + delta[ip1]
+ * delta[ip1] * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1))
+ )) / (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__,
+ dabs(r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - delta[iim1] * dpsi - delta[iip1] * dphi;
+ zz[0] = delta[iim1] * delta[iim1] * dpsi;
+ zz[2] = delta[iip1] * delta[iip1] * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / delta[iim1];
+ temp1 *= temp1;
+ c__ = temp - delta[iip1] * (dpsi + dphi) - (d__[iim1]
+ - d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ zz[2] = delta[iip1] * delta[iip1] * (dpsi - temp1 +
+ dphi);
+ } else {
+ temp1 = z__[iip1] / delta[iip1];
+ temp1 *= temp1;
+ c__ = temp - delta[iim1] * (dpsi + dphi) - (d__[iip1]
+ - d__[iim1]) * temp1;
+ zz[0] = delta[iim1] * delta[iim1] * (dpsi + (dphi -
+ temp1));
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ slaed6_(&niter, &orgati, &c__, &delta[iim1], zz, &w, &eta,
+ info);
+ if (*info != 0) {
+ goto L250;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ temp = tau + eta;
+ if ((temp > dltub) || (temp < dltlb)) {
+ if (w < 0.f) {
+ eta = (dltub - tau) / 2.f;
+ } else {
+ eta = (dltlb - tau) / 2.f;
+ }
+ }
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+/* L210: */
+ }
+
+ tau += eta;
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / delta[j];
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L220: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / delta[j];
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L230: */
+ }
+
+ temp = z__[ii] / delta[ii];
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) *
+ 3.f + dabs(tau) * dw;
+ if (w * prew > 0.f && dabs(w) > dabs(prew) / 10.f) {
+ swtch = ! swtch;
+ }
+
+/* L240: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ if (orgati) {
+ *dlam = d__[*i__] + tau;
+ } else {
+ *dlam = d__[ip1] + tau;
+ }
+
+ }
+
+L250:
+
+ return 0;
+
+/* End of SLAED4 */
+
+} /* slaed4_ */
+
+/* Subroutine */ int slaed5_(integer *i__, real *d__, real *z__, real *delta,
+ real *rho, real *dlam)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real b, c__, w, del, tau, temp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ This subroutine computes the I-th eigenvalue of a symmetric rank-one
+ modification of a 2-by-2 diagonal matrix
+
+ diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal elements in the array D are assumed to satisfy
+
+ D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) REAL array, dimension (2)
+ The original eigenvalues. We assume D(1) < D(2).
+
+ Z (input) REAL array, dimension (2)
+ The components of the updating vector.
+
+ DELTA (output) REAL array, dimension (2)
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) REAL
+ The scalar in the symmetric updating formula.
+
+ DLAM (output) REAL
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ if (*i__ == 1) {
+ w = *rho * 2.f * (z__[2] * z__[2] - z__[1] * z__[1]) / del + 1.f;
+ if (w > 0.f) {
+ b = del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * del;
+
+/* B > ZERO, always */
+
+ tau = c__ * 2.f / (b + sqrt((r__1 = b * b - c__ * 4.f, dabs(r__1))
+ ));
+ *dlam = d__[1] + tau;
+ delta[1] = -z__[1] / tau;
+ delta[2] = z__[2] / (del - tau);
+ } else {
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.f) {
+ tau = c__ * -2.f / (b + sqrt(b * b + c__ * 4.f));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.f)) / 2.f;
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ }
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ } else {
+
+/* Now I=2 */
+
+ b = -del + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * del;
+ if (b > 0.f) {
+ tau = (b + sqrt(b * b + c__ * 4.f)) / 2.f;
+ } else {
+ tau = c__ * 2.f / (-b + sqrt(b * b + c__ * 4.f));
+ }
+ *dlam = d__[2] + tau;
+ delta[1] = -z__[1] / (del + tau);
+ delta[2] = -z__[2] / tau;
+ temp = sqrt(delta[1] * delta[1] + delta[2] * delta[2]);
+ delta[1] /= temp;
+ delta[2] /= temp;
+ }
+ return 0;
+
+/* End OF SLAED5 */
+
+} /* slaed5_ */
+
+/* Subroutine */ int slaed6_(integer *kniter, logical *orgati, real *rho,
+ real *d__, real *z__, real *finit, real *tau, integer *info)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal), log(doublereal), pow_ri(real *, integer *);
+
+ /* Local variables */
+ static real a, b, c__, f;
+ static integer i__;
+ static real fc, df, ddf, eta, eps, base;
+ static integer iter;
+ static real temp, temp1, temp2, temp3, temp4;
+ static logical scale;
+ static integer niter;
+ static real small1, small2, sminv1, sminv2, dscale[3], sclfac;
+ extern doublereal slamch_(char *);
+ static real zscale[3], erretm, sclinv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAED6 computes the positive or negative root (closest to the origin)
+ of
+ z(1) z(2) z(3)
+ f(x) = rho + --------- + ---------- + ---------
+ d(1)-x d(2)-x d(3)-x
+
+ It is assumed that
+
+ if ORGATI = .true. the root is between d(2) and d(3);
+ otherwise it is between d(1) and d(2)
+
+ This routine will be called by SLAED4 when necessary. In most cases,
+ the root sought is the smallest in magnitude, though it might not be
+ in some extremely rare situations.
+
+ Arguments
+ =========
+
+ KNITER (input) INTEGER
+ Refer to SLAED4 for its significance.
+
+ ORGATI (input) LOGICAL
+ If ORGATI is true, the needed root is between d(2) and
+ d(3); otherwise it is between d(1) and d(2). See
+ SLAED4 for further details.
+
+ RHO (input) REAL
+ Refer to the equation f(x) above.
+
+ D (input) REAL array, dimension (3)
+ D satisfies d(1) < d(2) < d(3).
+
+ Z (input) REAL array, dimension (3)
+ Each of the elements in z must be positive.
+
+ FINIT (input) REAL
+ The value of f at 0. It is more accurate than the one
+ evaluated inside this routine (if someone wants to do
+ so).
+
+ TAU (output) REAL
+ The root of the equation f(x).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, failure to converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+ --d__;
+
+ /* Function Body */
+
+ *info = 0;
+
+ niter = 1;
+ *tau = 0.f;
+ if (*kniter == 2) {
+ if (*orgati) {
+ temp = (d__[3] - d__[2]) / 2.f;
+ c__ = *rho + z__[1] / (d__[1] - d__[2] - temp);
+ a = c__ * (d__[2] + d__[3]) + z__[2] + z__[3];
+ b = c__ * d__[2] * d__[3] + z__[2] * d__[3] + z__[3] * d__[2];
+ } else {
+ temp = (d__[1] - d__[2]) / 2.f;
+ c__ = *rho + z__[3] / (d__[3] - d__[2] - temp);
+ a = c__ * (d__[1] + d__[2]) + z__[1] + z__[2];
+ b = c__ * d__[1] * d__[2] + z__[1] * d__[2] + z__[2] * d__[1];
+ }
+/* Computing MAX */
+ r__1 = dabs(a), r__2 = dabs(b), r__1 = max(r__1,r__2), r__2 = dabs(
+ c__);
+ temp = dmax(r__1,r__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.f) {
+ *tau = b / a;
+ } else if (a <= 0.f) {
+ *tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ *tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ temp = *rho + z__[1] / (d__[1] - *tau) + z__[2] / (d__[2] - *tau) +
+ z__[3] / (d__[3] - *tau);
+ if (dabs(*finit) <= dabs(temp)) {
+ *tau = 0.f;
+ }
+ }
+
+/*
+ On first call to routine, get machine parameters for
+ possible scaling to avoid overflow
+*/
+
+ if (first) {
+ eps = slamch_("Epsilon");
+ base = slamch_("Base");
+ i__1 = (integer) (log(slamch_("SafMin")) / log(base) / 3.f)
+ ;
+ small1 = pow_ri(&base, &i__1);
+ sminv1 = 1.f / small1;
+ small2 = small1 * small1;
+ sminv2 = sminv1 * sminv1;
+ first = FALSE_;
+ }
+
+/*
+ Determine if scaling of inputs necessary to avoid overflow
+ when computing 1/TEMP**3
+*/
+
+ if (*orgati) {
+/* Computing MIN */
+ r__3 = (r__1 = d__[2] - *tau, dabs(r__1)), r__4 = (r__2 = d__[3] - *
+ tau, dabs(r__2));
+ temp = dmin(r__3,r__4);
+ } else {
+/* Computing MIN */
+ r__3 = (r__1 = d__[1] - *tau, dabs(r__1)), r__4 = (r__2 = d__[2] - *
+ tau, dabs(r__2));
+ temp = dmin(r__3,r__4);
+ }
+ scale = FALSE_;
+ if (temp <= small1) {
+ scale = TRUE_;
+ if (temp <= small2) {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(2/3) */
+
+ sclfac = sminv2;
+ sclinv = small2;
+ } else {
+
+/* Scale up by power of radix nearest 1/SAFMIN**(1/3) */
+
+ sclfac = sminv1;
+ sclinv = small1;
+ }
+
+/* Scaling up safe because D, Z, TAU scaled elsewhere to be O(1) */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__] * sclfac;
+ zscale[i__ - 1] = z__[i__] * sclfac;
+/* L10: */
+ }
+ *tau *= sclfac;
+ } else {
+
+/* Copy D and Z to DSCALE and ZSCALE */
+
+ for (i__ = 1; i__ <= 3; ++i__) {
+ dscale[i__ - 1] = d__[i__];
+ zscale[i__ - 1] = z__[i__];
+/* L20: */
+ }
+ }
+
+ fc = 0.f;
+ df = 0.f;
+ ddf = 0.f;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1.f / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ fc += temp1 / dscale[i__ - 1];
+ df += temp2;
+ ddf += temp3;
+/* L30: */
+ }
+ f = *finit + *tau * fc;
+
+ if (dabs(f) <= 0.f) {
+ goto L60;
+ }
+
+/*
+ Iteration begins
+
+ It is not hard to see that
+
+ 1) Iterations will go up monotonically
+ if FINIT < 0;
+
+ 2) Iterations will go down monotonically
+ if FINIT > 0.
+*/
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+ if (*orgati) {
+ temp1 = dscale[1] - *tau;
+ temp2 = dscale[2] - *tau;
+ } else {
+ temp1 = dscale[0] - *tau;
+ temp2 = dscale[1] - *tau;
+ }
+ a = (temp1 + temp2) * f - temp1 * temp2 * df;
+ b = temp1 * temp2 * f;
+ c__ = f - (temp1 + temp2) * df + temp1 * temp2 * ddf;
+/* Computing MAX */
+ r__1 = dabs(a), r__2 = dabs(b), r__1 = max(r__1,r__2), r__2 = dabs(
+ c__);
+ temp = dmax(r__1,r__2);
+ a /= temp;
+ b /= temp;
+ c__ /= temp;
+ if (c__ == 0.f) {
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ if (f * eta >= 0.f) {
+ eta = -f / df;
+ }
+
+ temp = eta + *tau;
+ if (*orgati) {
+ if (eta > 0.f && temp >= dscale[2]) {
+ eta = (dscale[2] - *tau) / 2.f;
+ }
+ if (eta < 0.f && temp <= dscale[1]) {
+ eta = (dscale[1] - *tau) / 2.f;
+ }
+ } else {
+ if (eta > 0.f && temp >= dscale[1]) {
+ eta = (dscale[1] - *tau) / 2.f;
+ }
+ if (eta < 0.f && temp <= dscale[0]) {
+ eta = (dscale[0] - *tau) / 2.f;
+ }
+ }
+ *tau += eta;
+
+ fc = 0.f;
+ erretm = 0.f;
+ df = 0.f;
+ ddf = 0.f;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ temp = 1.f / (dscale[i__ - 1] - *tau);
+ temp1 = zscale[i__ - 1] * temp;
+ temp2 = temp1 * temp;
+ temp3 = temp2 * temp;
+ temp4 = temp1 / dscale[i__ - 1];
+ fc += temp4;
+ erretm += dabs(temp4);
+ df += temp2;
+ ddf += temp3;
+/* L40: */
+ }
+ f = *finit + *tau * fc;
+ erretm = (dabs(*finit) + dabs(*tau) * erretm) * 8.f + dabs(*tau) * df;
+ if (dabs(f) <= eps * erretm) {
+ goto L60;
+ }
+/* L50: */
+ }
+ *info = 1;
+L60:
+
+/* Undo scaling */
+
+ if (scale) {
+ *tau *= sclinv;
+ }
+ return 0;
+
+/* End of SLAED6 */
+
+} /* slaed6_ */
+
+/* Subroutine */ int slaed7_(integer *icompq, integer *n, integer *qsiz,
+ integer *tlvls, integer *curlvl, integer *curpbm, real *d__, real *q,
+ integer *ldq, integer *indxq, real *rho, integer *cutpnt, real *
+ qstore, integer *qptr, integer *prmptr, integer *perm, integer *
+ givptr, integer *givcol, real *givnum, real *work, integer *iwork,
+ integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr,
+ indxc;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer indxp;
+ extern /* Subroutine */ int slaed8_(integer *, integer *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+ , real *, real *, real *, integer *, real *, integer *, integer *,
+ integer *, real *, integer *, integer *, integer *), slaed9_(
+ integer *, integer *, integer *, integer *, real *, real *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ slaeda_(integer *, integer *, integer *, integer *, integer *,
+ integer *, integer *, integer *, real *, real *, integer *, real *
+ , real *, integer *);
+ static integer idlmda;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ static integer coltyp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAED7 computes the updated eigensystem of a diagonal
+ matrix after modification by a rank-one symmetric matrix. This
+ routine is used only for the eigenproblem which requires all
+ eigenvalues and optionally eigenvectors of a dense symmetric matrix
+ that has been reduced to tridiagonal form. SLAED1 handles
+ the case in which all eigenvalues and eigenvectors of a symmetric
+ tridiagonal matrix are desired.
+
+ T = Q(in) ( D(in) + RHO * Z*Z' ) Q'(in) = Q(out) * D(out) * Q'(out)
+
+ where Z = Q'u, u is a vector of length N with ones in the
+ CUTPNT and CUTPNT + 1 th elements and zeros elsewhere.
+
+ The eigenvectors of the original matrix are stored in Q, and the
+ eigenvalues are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple eigenvalues or if there is a zero in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine SLAED8.
+
+ The second stage consists of calculating the updated
+ eigenvalues. This is done by finding the roots of the secular
+ equation via the routine SLAED4 (as called by SLAED9).
+ This routine also calculates the eigenvectors of the current
+ problem.
+
+ The final stage consists of computing the updated eigenvectors
+ directly using the updated eigenvalues. The eigenvectors for
+ the current problem are multiplied with the eigenvectors from
+ the overall problem.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= CURLVL <= TLVLS.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the eigenvalues of the rank-1-perturbed matrix.
+ On exit, the eigenvalues of the repaired matrix.
+
+ Q (input/output) REAL array, dimension (LDQ, N)
+ On entry, the eigenvectors of the rank-1-perturbed matrix.
+ On exit, the eigenvectors of the repaired tridiagonal matrix.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (output) INTEGER array, dimension (N)
+ The permutation which will reintegrate the subproblem just
+ solved back into sorted order, i.e., D( INDXQ( I = 1, N ) )
+ will be in ascending order.
+
+ RHO (input) REAL
+ The subdiagonal element used to create the rank-1
+ modification.
+
+ CUTPNT (input) INTEGER
+ Contains the location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ QSTORE (input/output) REAL array, dimension (N**2+1)
+ Stores eigenvectors of submatrices encountered during
+ divide and conquer, packed together. QPTR points to
+ beginning of the submatrices.
+
+ QPTR (input/output) INTEGER array, dimension (N+2)
+ List of indices pointing to beginning of submatrices stored
+ in QSTORE. The submatrices are numbered starting at the
+ bottom left of the divide and conquer tree, from left to
+ right and bottom to top.
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and also the size of
+ the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) REAL array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ WORK (workspace) REAL array, dimension (3*N+QSIZ*N)
+
+ IWORK (workspace) INTEGER array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --qstore;
+ --qptr;
+ --prmptr;
+ --perm;
+ --givptr;
+ givcol -= 3;
+ givnum -= 3;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -9;
+ } else if ((min(1,*n) > *cutpnt) || (*n < *cutpnt)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED7", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in SLAED8 and SLAED9.
+*/
+
+ if (*icompq == 1) {
+ ldq2 = *qsiz;
+ } else {
+ ldq2 = *n;
+ }
+
+ iz = 1;
+ idlmda = iz + *n;
+ iw = idlmda + *n;
+ iq2 = iw + *n;
+ is = iq2 + *n * ldq2;
+
+ indx = 1;
+ indxc = indx + *n;
+ coltyp = indxc + *n;
+ indxp = coltyp + *n;
+
+/*
+ Form the z-vector which consists of the last row of Q_1 and the
+ first row of Q_2.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *tlvls - i__;
+ ptr += pow_ii(&c__2, &i__2);
+/* L10: */
+ }
+ curr = ptr + *curpbm;
+ slaeda_(n, tlvls, curlvl, curpbm, &prmptr[1], &perm[1], &givptr[1], &
+ givcol[3], &givnum[3], &qstore[1], &qptr[1], &work[iz], &work[iz
+ + *n], info);
+
+/*
+ When solving the final problem, we no longer need the stored data,
+ so we will overwrite the data from this level onto the previously
+ used storage space.
+*/
+
+ if (*curlvl == *tlvls) {
+ qptr[curr] = 1;
+ prmptr[curr] = 1;
+ givptr[curr] = 1;
+ }
+
+/* Sort and Deflate eigenvalues. */
+
+ slaed8_(icompq, &k, n, qsiz, &d__[1], &q[q_offset], ldq, &indxq[1], rho,
+ cutpnt, &work[iz], &work[idlmda], &work[iq2], &ldq2, &work[iw], &
+ perm[prmptr[curr]], &givptr[curr + 1], &givcol[((givptr[curr]) <<
+ (1)) + 1], &givnum[((givptr[curr]) << (1)) + 1], &iwork[indxp], &
+ iwork[indx], info);
+ prmptr[curr + 1] = prmptr[curr] + *n;
+ givptr[curr + 1] += givptr[curr];
+
+/* Solve Secular Equation. */
+
+ if (k != 0) {
+ slaed9_(&k, &c__1, &k, n, &d__[1], &work[is], &k, rho, &work[idlmda],
+ &work[iw], &qstore[qptr[curr]], &k, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (*icompq == 1) {
+ sgemm_("N", "N", qsiz, &k, &k, &c_b1011, &work[iq2], &ldq2, &
+ qstore[qptr[curr]], &k, &c_b320, &q[q_offset], ldq);
+ }
+/* Computing 2nd power */
+ i__1 = k;
+ qptr[curr + 1] = qptr[curr] + i__1 * i__1;
+
+/* Prepare the INDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = *n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &indxq[1]);
+ } else {
+ qptr[curr + 1] = qptr[curr];
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ indxq[i__] = i__;
+/* L20: */
+ }
+ }
+
+L30:
+ return 0;
+
+/* End of SLAED7 */
+
+} /* slaed7_ */
+
+/* Subroutine */ int slaed8_(integer *icompq, integer *k, integer *n, integer
+ *qsiz, real *d__, real *q, integer *ldq, integer *indxq, real *rho,
+ integer *cutpnt, real *z__, real *dlamda, real *q2, integer *ldq2,
+ real *w, integer *perm, integer *givptr, integer *givcol, real *
+ givnum, integer *indxp, integer *indx, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real c__;
+ static integer i__, j;
+ static real s, t;
+ static integer k2, n1, n2, jp, n1p1;
+ static real eps, tau, tol;
+ static integer jlam, imax, jmax;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sscal_(integer *, real *, real *,
+ integer *), scopy_(integer *, real *, integer *, real *, integer *
+ );
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer
+ *, integer *, integer *), slacpy_(char *, integer *, integer *,
+ real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAED8 merges the two sets of eigenvalues together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ eigenvalues are close together or if there is a tiny element in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ = 0: Compute eigenvalues only.
+ = 1: Compute eigenvectors of original dense symmetric matrix
+ also. On entry, Q contains the orthogonal matrix used
+ to reduce the original matrix to tridiagonal form.
+
+ K (output) INTEGER
+ The number of non-deflated eigenvalues, and the order of the
+ related secular equation.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ QSIZ (input) INTEGER
+ The dimension of the orthogonal matrix used to reduce
+ the full matrix to tridiagonal form. QSIZ >= N if ICOMPQ = 1.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the eigenvalues of the two submatrices to be
+ combined. On exit, the trailing (N-K) updated eigenvalues
+ (those which were deflated) sorted into increasing order.
+
+ Q (input/output) REAL array, dimension (LDQ,N)
+ If ICOMPQ = 0, Q is not referenced. Otherwise,
+ on entry, Q contains the eigenvectors of the partially solved
+ system which has been previously updated in matrix
+ multiplies with other partially solved eigensystems.
+ On exit, Q contains the trailing (N-K) updated eigenvectors
+ (those which were deflated) in its last N-K columns.
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max(1,N).
+
+ INDXQ (input) INTEGER array, dimension (N)
+ The permutation which separately sorts the two sub-problems
+ in D into ascending order. Note that elements in the second
+ half of this permutation must first have CUTPNT added to
+ their values in order to be accurate.
+
+ RHO (input/output) REAL
+ On entry, the off-diagonal element associated with the rank-1
+ cut which originally split the two submatrices which are now
+ being recombined.
+ On exit, RHO has been modified to the value required by
+ SLAED3.
+
+ CUTPNT (input) INTEGER
+ The location of the last eigenvalue in the leading
+ sub-matrix. min(1,N) <= CUTPNT <= N.
+
+ Z (input) REAL array, dimension (N)
+ On entry, Z contains the updating vector (the last row of
+ the first sub-eigenvector matrix and the first row of the
+ second sub-eigenvector matrix).
+ On exit, the contents of Z are destroyed by the updating
+ process.
+
+ DLAMDA (output) REAL array, dimension (N)
+ A copy of the first K eigenvalues which will be used by
+ SLAED3 to form the secular equation.
+
+ Q2 (output) REAL array, dimension (LDQ2,N)
+ If ICOMPQ = 0, Q2 is not referenced. Otherwise,
+ a copy of the first K eigenvectors which will be used by
+ SLAED7 in a matrix multiply (SGEMM) to update the new
+ eigenvectors.
+
+ LDQ2 (input) INTEGER
+ The leading dimension of the array Q2. LDQ2 >= max(1,N).
+
+ W (output) REAL array, dimension (N)
+ The first k values of the final deflation-altered z-vector and
+ will be passed to SLAED3.
+
+ PERM (output) INTEGER array, dimension (N)
+ The permutations (from deflation and sorting) to be applied
+ to each eigenblock.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (output) INTEGER array, dimension (2, N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (output) REAL array, dimension (2, N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ INDXP (workspace) INTEGER array, dimension (N)
+ The permutation used to place deflated values of D at the end
+ of the array. INDXP(1:K) points to the nondeflated D-values
+ and INDXP(K+1:N) points to the deflated eigenvalues.
+
+ INDX (workspace) INTEGER array, dimension (N)
+ The permutation used to sort the contents of D into ascending
+ order.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --indxq;
+ --z__;
+ --dlamda;
+ q2_dim1 = *ldq2;
+ q2_offset = 1 + q2_dim1;
+ q2 -= q2_offset;
+ --w;
+ --perm;
+ givcol -= 3;
+ givnum -= 3;
+ --indxp;
+ --indx;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*icompq == 1 && *qsiz < *n) {
+ *info = -4;
+ } else if (*ldq < max(1,*n)) {
+ *info = -7;
+ } else if ((*cutpnt < min(1,*n)) || (*cutpnt > *n)) {
+ *info = -10;
+ } else if (*ldq2 < max(1,*n)) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ n1 = *cutpnt;
+ n2 = *n - n1;
+ n1p1 = n1 + 1;
+
+ if (*rho < 0.f) {
+ sscal_(&n2, &c_b1290, &z__[n1p1], &c__1);
+ }
+
+/* Normalize z so that norm(z) = 1 */
+
+ t = 1.f / sqrt(2.f);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ indx[j] = j;
+/* L10: */
+ }
+ sscal_(n, &t, &z__[1], &c__1);
+ *rho = (r__1 = *rho * 2.f, dabs(r__1));
+
+/* Sort the eigenvalues into increasing order */
+
+ i__1 = *n;
+ for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) {
+ indxq[i__] += *cutpnt;
+/* L20: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = d__[indxq[i__]];
+ w[i__] = z__[indxq[i__]];
+/* L30: */
+ }
+ i__ = 1;
+ j = *cutpnt + 1;
+ slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]);
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = dlamda[indx[i__]];
+ z__[i__] = w[indx[i__]];
+/* L40: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ imax = isamax_(n, &z__[1], &c__1);
+ jmax = isamax_(n, &d__[1], &c__1);
+ eps = slamch_("Epsilon");
+ tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1));
+
+/*
+ If the rank-1 modifier is small enough, no more needs to be done
+ except to reorganize Q so that its columns correspond with the
+ elements in D.
+*/
+
+ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) {
+ *k = 0;
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+/* L50: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ perm[j] = indxq[indx[j]];
+ scopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1
+ + 1], &c__1);
+/* L60: */
+ }
+ slacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq);
+ }
+ return 0;
+ }
+
+/*
+ If there are multiple eigenvalues then the problem deflates. Here
+ the number of equal eigenvalues are found. As each equal
+ eigenvalue is found, an elementary reflector is computed to rotate
+ the corresponding eigensubspace so that the corresponding
+ components of Z are zero in this new basis.
+*/
+
+ *k = 0;
+ *givptr = 0;
+ k2 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ if (j == *n) {
+ goto L110;
+ }
+ } else {
+ jlam = j;
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ ++j;
+ if (j > *n) {
+ goto L100;
+ }
+ if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ indxp[k2] = j;
+ } else {
+
+/* Check if eigenvalues are close enough to allow deflation. */
+
+ s = z__[jlam];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = slapy2_(&c__, &s);
+ t = d__[j] - d__[jlam];
+ c__ /= tau;
+ s = -s / tau;
+ if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ z__[j] = tau;
+ z__[jlam] = 0.f;
+
+/* Record the appropriate Givens rotation */
+
+ ++(*givptr);
+ givcol[((*givptr) << (1)) + 1] = indxq[indx[jlam]];
+ givcol[((*givptr) << (1)) + 2] = indxq[indx[j]];
+ givnum[((*givptr) << (1)) + 1] = c__;
+ givnum[((*givptr) << (1)) + 2] = s;
+ if (*icompq == 1) {
+ srot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[
+ indxq[indx[j]] * q_dim1 + 1], &c__1, &c__, &s);
+ }
+ t = d__[jlam] * c__ * c__ + d__[j] * s * s;
+ d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__;
+ d__[jlam] = t;
+ --k2;
+ i__ = 1;
+L90:
+ if (k2 + i__ <= *n) {
+ if (d__[jlam] < d__[indxp[k2 + i__]]) {
+ indxp[k2 + i__ - 1] = indxp[k2 + i__];
+ indxp[k2 + i__] = jlam;
+ ++i__;
+ goto L90;
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ } else {
+ indxp[k2 + i__ - 1] = jlam;
+ }
+ jlam = j;
+ } else {
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+ jlam = j;
+ }
+ }
+ goto L80;
+L100:
+
+/* Record the last eigenvalue. */
+
+ ++(*k);
+ w[*k] = z__[jlam];
+ dlamda[*k] = d__[jlam];
+ indxp[*k] = jlam;
+
+L110:
+
+/*
+ Sort the eigenvalues and corresponding eigenvectors into DLAMDA
+ and Q2 respectively. The eigenvalues/vectors which were not
+ deflated go into the first K slots of DLAMDA and Q2 respectively,
+ while those which were deflated go into the last N - K slots.
+*/
+
+ if (*icompq == 0) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+/* L120: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ jp = indxp[j];
+ dlamda[j] = d__[jp];
+ perm[j] = indxq[indx[jp]];
+ scopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1]
+ , &c__1);
+/* L130: */
+ }
+ }
+
+/*
+ The deflated eigenvalues and their corresponding vectors go back
+ into the last N - K slots of D and Q respectively.
+*/
+
+ if (*k < *n) {
+ if (*icompq == 0) {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ } else {
+ i__1 = *n - *k;
+ scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = *n - *k;
+ slacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*
+ k + 1) * q_dim1 + 1], ldq);
+ }
+ }
+
+ return 0;
+
+/* End of SLAED8 */
+
+} /* slaed8_ */
+
+/* Subroutine */ int slaed9_(integer *k, integer *kstart, integer *kstop,
+ integer *n, real *d__, real *q, integer *ldq, real *rho, real *dlamda,
+ real *w, real *s, integer *lds, integer *info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer i__, j;
+ static real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slaed4_(integer *, integer *, real *, real *, real *,
+ real *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAED9 finds the roots of the secular equation, as defined by the
+ values in D, Z, and RHO, between KSTART and KSTOP. It makes the
+ appropriate calls to SLAED4 and then stores the new matrix of
+ eigenvectors for use in calculating the next level of Z vectors.
+
+ Arguments
+ =========
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved by
+ SLAED4. K >= 0.
+
+ KSTART (input) INTEGER
+ KSTOP (input) INTEGER
+ The updated eigenvalues Lambda(I), KSTART <= I <= KSTOP
+ are to be computed. 1 <= KSTART <= KSTOP <= K.
+
+ N (input) INTEGER
+ The number of rows and columns in the Q matrix.
+ N >= K (delation may result in N > K).
+
+ D (output) REAL array, dimension (N)
+ D(I) contains the updated eigenvalues
+ for KSTART <= I <= KSTOP.
+
+ Q (workspace) REAL array, dimension (LDQ,N)
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= max( 1, N ).
+
+ RHO (input) REAL
+ The value of the parameter in the rank one update equation.
+ RHO >= 0 required.
+
+ DLAMDA (input) REAL array, dimension (K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ W (input) REAL array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating vector.
+
+ S (output) REAL array, dimension (LDS, K)
+ Will contain the eigenvectors of the repaired matrix which
+ will be stored for subsequent Z vector calculation and
+ multiplied by the previously accumulated eigenvectors
+ to update the system.
+
+ LDS (input) INTEGER
+ The leading dimension of S. LDS >= max( 1, K ).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an eigenvalue did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dlamda;
+ --w;
+ s_dim1 = *lds;
+ s_offset = 1 + s_dim1;
+ s -= s_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*k < 0) {
+ *info = -1;
+ } else if ((*kstart < 1) || (*kstart > max(1,*k))) {
+ *info = -2;
+ } else if ((max(1,*kstop) < *kstart) || (*kstop > max(1,*k))) {
+ *info = -3;
+ } else if (*n < *k) {
+ *info = -4;
+ } else if (*ldq < max(1,*k)) {
+ *info = -7;
+ } else if (*lds < max(1,*k)) {
+ *info = -12;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAED9", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 0) {
+ return 0;
+ }
+
+/*
+ Modify values DLAMDA(i) to make sure all DLAMDA(i)-DLAMDA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DLAMDA(I) by 2*DLAMDA(I)-DLAMDA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DLAMDA(I) if it is 1; this makes the subsequent
+ subtractions DLAMDA(I)-DLAMDA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DLAMDA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DLAMDA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dlamda[i__] = slamc3_(&dlamda[i__], &dlamda[i__]) - dlamda[i__];
+/* L10: */
+ }
+
+ i__1 = *kstop;
+ for (j = *kstart; j <= i__1; ++j) {
+ slaed4_(k, &j, &dlamda[1], &w[1], &q[j * q_dim1 + 1], rho, &d__[j],
+ info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ goto L120;
+ }
+/* L20: */
+ }
+
+ if ((*k == 1) || (*k == 2)) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *k;
+ for (j = 1; j <= i__2; ++j) {
+ s[j + i__ * s_dim1] = q[j + i__ * q_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L120;
+ }
+
+/* Compute updated W. */
+
+ scopy_(k, &w[1], &c__1, &s[s_offset], &c__1);
+
+/* Initialize W(I) = Q(I,I) */
+
+ i__1 = *ldq + 1;
+ scopy_(k, &q[q_offset], &i__1, &w[1], &c__1);
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L50: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ w[i__] *= q[i__ + j * q_dim1] / (dlamda[i__] - dlamda[j]);
+/* L60: */
+ }
+/* L70: */
+ }
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = sqrt(-w[i__]);
+ w[i__] = r_sign(&r__1, &s[i__ + s_dim1]);
+/* L80: */
+ }
+
+/* Compute eigenvectors of the modified rank-1 modification. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ q[i__ + j * q_dim1] = w[i__] / q[i__ + j * q_dim1];
+/* L90: */
+ }
+ temp = snrm2_(k, &q[j * q_dim1 + 1], &c__1);
+ i__2 = *k;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ s[i__ + j * s_dim1] = q[i__ + j * q_dim1] / temp;
+/* L100: */
+ }
+/* L110: */
+ }
+
+L120:
+ return 0;
+
+/* End of SLAED9 */
+
+} /* slaed9_ */
+
+/* Subroutine */ int slaeda_(integer *n, integer *tlvls, integer *curlvl,
+ integer *curpbm, integer *prmptr, integer *perm, integer *givptr,
+ integer *givcol, real *givnum, real *q, integer *qptr, real *z__,
+ real *ztemp, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, k, mid, ptr, curr;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ static integer bsiz1, bsiz2, psiz1, psiz2, zptr1;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), scopy_(integer *, real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAEDA computes the Z vector corresponding to the merge step in the
+ CURLVLth step of the merge process with TLVLS steps for the CURPBMth
+ problem.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ TLVLS (input) INTEGER
+ The total number of merging levels in the overall divide and
+ conquer tree.
+
+ CURLVL (input) INTEGER
+ The current level in the overall merge routine,
+ 0 <= curlvl <= tlvls.
+
+ CURPBM (input) INTEGER
+ The current problem in the current level in the overall
+ merge routine (counting from upper left to lower right).
+
+ PRMPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in PERM a
+ level's permutation is stored. PRMPTR(i+1) - PRMPTR(i)
+ indicates the size of the permutation and incidentally the
+ size of the full, non-deflated problem.
+
+ PERM (input) INTEGER array, dimension (N lg N)
+ Contains the permutations (from deflation and sorting) to be
+ applied to each eigenblock.
+
+ GIVPTR (input) INTEGER array, dimension (N lg N)
+ Contains a list of pointers which indicate where in GIVCOL a
+ level's Givens rotations are stored. GIVPTR(i+1) - GIVPTR(i)
+ indicates the number of Givens rotations.
+
+ GIVCOL (input) INTEGER array, dimension (2, N lg N)
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation.
+
+ GIVNUM (input) REAL array, dimension (2, N lg N)
+ Each number indicates the S value to be used in the
+ corresponding Givens rotation.
+
+ Q (input) REAL array, dimension (N**2)
+ Contains the square eigenblocks from previous levels, the
+ starting positions for blocks are given by QPTR.
+
+ QPTR (input) INTEGER array, dimension (N+2)
+ Contains a list of pointers which indicate where in Q an
+ eigenblock is stored. SQRT( QPTR(i+1) - QPTR(i) ) indicates
+ the size of the block.
+
+ Z (output) REAL array, dimension (N)
+ On output this vector contains the updating vector (the last
+ row of the first sub-eigenvector matrix and the first row of
+ the second sub-eigenvector matrix).
+
+ ZTEMP (workspace) REAL array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --ztemp;
+ --z__;
+ --qptr;
+ --q;
+ givnum -= 3;
+ givcol -= 3;
+ --givptr;
+ --perm;
+ --prmptr;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAEDA", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine location of first number in second half. */
+
+ mid = *n / 2 + 1;
+
+/* Gather last/first rows of appropriate eigenblocks into center of Z */
+
+ ptr = 1;
+
+/*
+ Determine location of lowest level subproblem in the full storage
+ scheme
+*/
+
+ i__1 = *curlvl - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, curlvl) + pow_ii(&c__2, &i__1) - 1;
+
+/*
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these square
+ roots.
+*/
+
+ bsiz1 = (integer) (sqrt((real) (qptr[curr + 1] - qptr[curr])) + .5f);
+ bsiz2 = (integer) (sqrt((real) (qptr[curr + 2] - qptr[curr + 1])) + .5f);
+ i__1 = mid - bsiz1 - 1;
+ for (k = 1; k <= i__1; ++k) {
+ z__[k] = 0.f;
+/* L10: */
+ }
+ scopy_(&bsiz1, &q[qptr[curr] + bsiz1 - 1], &bsiz1, &z__[mid - bsiz1], &
+ c__1);
+ scopy_(&bsiz2, &q[qptr[curr + 1]], &bsiz2, &z__[mid], &c__1);
+ i__1 = *n;
+ for (k = mid + bsiz2; k <= i__1; ++k) {
+ z__[k] = 0.f;
+/* L20: */
+ }
+
+/*
+ Loop thru remaining levels 1 -> CURLVL applying the Givens
+ rotations and permutation and then multiplying the center matrices
+ against the current Z.
+*/
+
+ ptr = pow_ii(&c__2, tlvls) + 1;
+ i__1 = *curlvl - 1;
+ for (k = 1; k <= i__1; ++k) {
+ i__2 = *curlvl - k;
+ i__3 = *curlvl - k - 1;
+ curr = ptr + *curpbm * pow_ii(&c__2, &i__2) + pow_ii(&c__2, &i__3) -
+ 1;
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ zptr1 = mid - psiz1;
+
+/* Apply Givens at CURR and CURR+1 */
+
+ i__2 = givptr[curr + 1] - 1;
+ for (i__ = givptr[curr]; i__ <= i__2; ++i__) {
+ srot_(&c__1, &z__[zptr1 + givcol[((i__) << (1)) + 1] - 1], &c__1,
+ &z__[zptr1 + givcol[((i__) << (1)) + 2] - 1], &c__1, &
+ givnum[((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L30: */
+ }
+ i__2 = givptr[curr + 2] - 1;
+ for (i__ = givptr[curr + 1]; i__ <= i__2; ++i__) {
+ srot_(&c__1, &z__[mid - 1 + givcol[((i__) << (1)) + 1]], &c__1, &
+ z__[mid - 1 + givcol[((i__) << (1)) + 2]], &c__1, &givnum[
+ ((i__) << (1)) + 1], &givnum[((i__) << (1)) + 2]);
+/* L40: */
+ }
+ psiz1 = prmptr[curr + 1] - prmptr[curr];
+ psiz2 = prmptr[curr + 2] - prmptr[curr + 1];
+ i__2 = psiz1 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[i__ + 1] = z__[zptr1 + perm[prmptr[curr] + i__] - 1];
+/* L50: */
+ }
+ i__2 = psiz2 - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ ztemp[psiz1 + i__ + 1] = z__[mid + perm[prmptr[curr + 1] + i__] -
+ 1];
+/* L60: */
+ }
+
+/*
+ Multiply Blocks at CURR and CURR+1
+
+ Determine size of these matrices. We add HALF to the value of
+ the SQRT in case the machine underestimates one of these
+ square roots.
+*/
+
+ bsiz1 = (integer) (sqrt((real) (qptr[curr + 1] - qptr[curr])) + .5f);
+ bsiz2 = (integer) (sqrt((real) (qptr[curr + 2] - qptr[curr + 1])) +
+ .5f);
+ if (bsiz1 > 0) {
+ sgemv_("T", &bsiz1, &bsiz1, &c_b1011, &q[qptr[curr]], &bsiz1, &
+ ztemp[1], &c__1, &c_b320, &z__[zptr1], &c__1);
+ }
+ i__2 = psiz1 - bsiz1;
+ scopy_(&i__2, &ztemp[bsiz1 + 1], &c__1, &z__[zptr1 + bsiz1], &c__1);
+ if (bsiz2 > 0) {
+ sgemv_("T", &bsiz2, &bsiz2, &c_b1011, &q[qptr[curr + 1]], &bsiz2,
+ &ztemp[psiz1 + 1], &c__1, &c_b320, &z__[mid], &c__1);
+ }
+ i__2 = psiz2 - bsiz2;
+ scopy_(&i__2, &ztemp[psiz1 + bsiz2 + 1], &c__1, &z__[mid + bsiz2], &
+ c__1);
+
+ i__2 = *tlvls - k;
+ ptr += pow_ii(&c__2, &i__2);
+/* L70: */
+ }
+
+ return 0;
+
+/* End of SLAEDA */
+
+} /* slaeda_ */
+
+/* Subroutine */ int slaev2_(real *a, real *b, real *c__, real *rt1, real *
+ rt2, real *cs1, real *sn1)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real ab, df, cs, ct, tb, sm, tn, rt, adf, acs;
+ static integer sgn1, sgn2;
+ static real acmn, acmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAEV2 computes the eigendecomposition of a 2-by-2 symmetric matrix
+ [ A B ]
+ [ B C ].
+ On return, RT1 is the eigenvalue of larger absolute value, RT2 is the
+ eigenvalue of smaller absolute value, and (CS1,SN1) is the unit right
+ eigenvector for RT1, giving the decomposition
+
+ [ CS1 SN1 ] [ A B ] [ CS1 -SN1 ] = [ RT1 0 ]
+ [-SN1 CS1 ] [ B C ] [ SN1 CS1 ] [ 0 RT2 ].
+
+ Arguments
+ =========
+
+ A (input) REAL
+ The (1,1) element of the 2-by-2 matrix.
+
+ B (input) REAL
+ The (1,2) element and the conjugate of the (2,1) element of
+ the 2-by-2 matrix.
+
+ C (input) REAL
+ The (2,2) element of the 2-by-2 matrix.
+
+ RT1 (output) REAL
+ The eigenvalue of larger absolute value.
+
+ RT2 (output) REAL
+ The eigenvalue of smaller absolute value.
+
+ CS1 (output) REAL
+ SN1 (output) REAL
+ The vector (CS1, SN1) is a unit right eigenvector for RT1.
+
+ Further Details
+ ===============
+
+ RT1 is accurate to a few ulps barring over/underflow.
+
+ RT2 may be inaccurate if there is massive cancellation in the
+ determinant A*C-B*B; higher precision or correctly rounded or
+ correctly truncated arithmetic would be needed to compute RT2
+ accurately in all cases.
+
+ CS1 and SN1 are accurate to a few ulps barring over/underflow.
+
+ Overflow is possible only if RT1 is within a factor of 5 of overflow.
+ Underflow is harmless if the input data is 0 or exceeds
+ underflow_threshold / macheps.
+
+ =====================================================================
+
+
+ Compute the eigenvalues
+*/
+
+ sm = *a + *c__;
+ df = *a - *c__;
+ adf = dabs(df);
+ tb = *b + *b;
+ ab = dabs(tb);
+ if (dabs(*a) > dabs(*c__)) {
+ acmx = *a;
+ acmn = *c__;
+ } else {
+ acmx = *c__;
+ acmn = *a;
+ }
+ if (adf > ab) {
+/* Computing 2nd power */
+ r__1 = ab / adf;
+ rt = adf * sqrt(r__1 * r__1 + 1.f);
+ } else if (adf < ab) {
+/* Computing 2nd power */
+ r__1 = adf / ab;
+ rt = ab * sqrt(r__1 * r__1 + 1.f);
+ } else {
+
+/* Includes case AB=ADF=0 */
+
+ rt = ab * sqrt(2.f);
+ }
+ if (sm < 0.f) {
+ *rt1 = (sm - rt) * .5f;
+ sgn1 = -1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else if (sm > 0.f) {
+ *rt1 = (sm + rt) * .5f;
+ sgn1 = 1;
+
+/*
+ Order of execution important.
+ To get fully accurate smaller eigenvalue,
+ next line needs to be executed in higher precision.
+*/
+
+ *rt2 = acmx / *rt1 * acmn - *b / *rt1 * *b;
+ } else {
+
+/* Includes case RT1 = RT2 = 0 */
+
+ *rt1 = rt * .5f;
+ *rt2 = rt * -.5f;
+ sgn1 = 1;
+ }
+
+/* Compute the eigenvector */
+
+ if (df >= 0.f) {
+ cs = df + rt;
+ sgn2 = 1;
+ } else {
+ cs = df - rt;
+ sgn2 = -1;
+ }
+ acs = dabs(cs);
+ if (acs > ab) {
+ ct = -tb / cs;
+ *sn1 = 1.f / sqrt(ct * ct + 1.f);
+ *cs1 = ct * *sn1;
+ } else {
+ if (ab == 0.f) {
+ *cs1 = 1.f;
+ *sn1 = 0.f;
+ } else {
+ tn = -cs / tb;
+ *cs1 = 1.f / sqrt(tn * tn + 1.f);
+ *sn1 = tn * *cs1;
+ }
+ }
+ if (sgn1 == sgn2) {
+ tn = *cs1;
+ *cs1 = -(*sn1);
+ *sn1 = tn;
+ }
+ return 0;
+
+/* End of SLAEV2 */
+
+} /* slaev2_ */
+
+/* Subroutine */ int slahqr_(logical *wantt, logical *wantz, integer *n,
+ integer *ilo, integer *ihi, real *h__, integer *ldh, real *wr, real *
+ wi, integer *iloz, integer *ihiz, real *z__, integer *ldz, integer *
+ info)
+{
+ /* System generated locals */
+ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer i__, j, k, l, m;
+ static real s, v[3];
+ static integer i1, i2;
+ static real t1, t2, t3, v1, v2, v3, h00, h10, h11, h12, h21, h22, h33,
+ h44;
+ static integer nh;
+ static real cs;
+ static integer nr;
+ static real sn;
+ static integer nz;
+ static real ave, h33s, h44s;
+ static integer itn, its;
+ static real ulp, sum, tst1, h43h34, disc, unfl, ovfl, work[1];
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), scopy_(integer *, real *, integer *,
+ real *, integer *), slanv2_(real *, real *, real *, real *, real *
+ , real *, real *, real *, real *, real *), slabad_(real *, real *)
+ ;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *,
+ real *);
+ extern doublereal slanhs_(char *, integer *, real *, integer *, real *);
+ static real smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAHQR is an auxiliary routine called by SHSEQR to update the
+ eigenvalues and Schur decomposition already computed by SHSEQR, by
+ dealing with the Hessenberg submatrix in rows and columns ILO to IHI.
+
+ Arguments
+ =========
+
+ WANTT (input) LOGICAL
+ = .TRUE. : the full Schur form T is required;
+ = .FALSE.: only eigenvalues are required.
+
+ WANTZ (input) LOGICAL
+ = .TRUE. : the matrix of Schur vectors Z is required;
+ = .FALSE.: Schur vectors are not required.
+
+ N (input) INTEGER
+ The order of the matrix H. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ It is assumed that H is already upper quasi-triangular in
+ rows and columns IHI+1:N, and that H(ILO,ILO-1) = 0 (unless
+ ILO = 1). SLAHQR works primarily with the Hessenberg
+ submatrix in rows and columns ILO to IHI, but applies
+ transformations to all of H if WANTT is .TRUE..
+ 1 <= ILO <= max(1,IHI); IHI <= N.
+
+ H (input/output) REAL array, dimension (LDH,N)
+ On entry, the upper Hessenberg matrix H.
+ On exit, if WANTT is .TRUE., H is upper quasi-triangular in
+ rows and columns ILO:IHI, with any 2-by-2 diagonal blocks in
+ standard form. If WANTT is .FALSE., the contents of H are
+ unspecified on exit.
+
+ LDH (input) INTEGER
+ The leading dimension of the array H. LDH >= max(1,N).
+
+ WR (output) REAL array, dimension (N)
+ WI (output) REAL array, dimension (N)
+ The real and imaginary parts, respectively, of the computed
+ eigenvalues ILO to IHI are stored in the corresponding
+ elements of WR and WI. If two eigenvalues are computed as a
+ complex conjugate pair, they are stored in consecutive
+ elements of WR and WI, say the i-th and (i+1)th, with
+ WI(i) > 0 and WI(i+1) < 0. If WANTT is .TRUE., the
+ eigenvalues are stored in the same order as on the diagonal
+ of the Schur form returned in H, with WR(i) = H(i,i), and, if
+ H(i:i+1,i:i+1) is a 2-by-2 diagonal block,
+ WI(i) = sqrt(H(i+1,i)*H(i,i+1)) and WI(i+1) = -WI(i).
+
+ ILOZ (input) INTEGER
+ IHIZ (input) INTEGER
+ Specify the rows of Z to which transformations must be
+ applied if WANTZ is .TRUE..
+ 1 <= ILOZ <= ILO; IHI <= IHIZ <= N.
+
+ Z (input/output) REAL array, dimension (LDZ,N)
+ If WANTZ is .TRUE., on entry Z must contain the current
+ matrix Z of transformations accumulated by SHSEQR, and on
+ exit Z has been updated; transformations are applied only to
+ the submatrix Z(ILOZ:IHIZ,ILO:IHI).
+ If WANTZ is .FALSE., Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: SLAHQR failed to compute all the eigenvalues ILO to IHI
+ in a total of 30*(IHI-ILO+1) iterations; if INFO = i,
+ elements i+1:ihi of WR and WI contain those eigenvalues
+ which have been successfully computed.
+
+ Further Details
+ ===============
+
+ 2-96 Based on modifications by
+ David Day, Sandia National Laboratory, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ h_dim1 = *ldh;
+ h_offset = 1 + h_dim1;
+ h__ -= h_offset;
+ --wr;
+ --wi;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*ilo == *ihi) {
+ wr[*ilo] = h__[*ilo + *ilo * h_dim1];
+ wi[*ilo] = 0.f;
+ return 0;
+ }
+
+ nh = *ihi - *ilo + 1;
+ nz = *ihiz - *iloz + 1;
+
+/*
+ Set machine-dependent constants for the stopping criterion.
+ If norm(H) <= sqrt(OVFL), overflow should not occur.
+*/
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (nh / ulp);
+
+/*
+ I1 and I2 are the indices of the first row and last column of H
+ to which transformations must be applied. If eigenvalues only are
+ being computed, I1 and I2 are set inside the main loop.
+*/
+
+ if (*wantt) {
+ i1 = 1;
+ i2 = *n;
+ }
+
+/* ITN is the total number of QR iterations allowed. */
+
+ itn = nh * 30;
+
+/*
+ The main loop begins here. I is the loop index and decreases from
+ IHI to ILO in steps of 1 or 2. Each iteration of the loop works
+ with the active submatrix in rows and columns L to I.
+ Eigenvalues I+1 to IHI have already converged. Either L = ILO or
+ H(L,L-1) is negligible so that the matrix splits.
+*/
+
+ i__ = *ihi;
+L10:
+ l = *ilo;
+ if (i__ < *ilo) {
+ goto L150;
+ }
+
+/*
+ Perform QR iterations on rows and columns ILO to I until a
+ submatrix of order 1 or 2 splits off at the bottom because a
+ subdiagonal element has become negligible.
+*/
+
+ i__1 = itn;
+ for (its = 0; its <= i__1; ++its) {
+
+/* Look for a single small subdiagonal element. */
+
+ i__2 = l + 1;
+ for (k = i__; k >= i__2; --k) {
+ tst1 = (r__1 = h__[k - 1 + (k - 1) * h_dim1], dabs(r__1)) + (r__2
+ = h__[k + k * h_dim1], dabs(r__2));
+ if (tst1 == 0.f) {
+ i__3 = i__ - l + 1;
+ tst1 = slanhs_("1", &i__3, &h__[l + l * h_dim1], ldh, work);
+ }
+/* Computing MAX */
+ r__2 = ulp * tst1;
+ if ((r__1 = h__[k + (k - 1) * h_dim1], dabs(r__1)) <= dmax(r__2,
+ smlnum)) {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ l = k;
+ if (l > *ilo) {
+
+/* H(L,L-1) is negligible */
+
+ h__[l + (l - 1) * h_dim1] = 0.f;
+ }
+
+/* Exit from loop if a submatrix of order 1 or 2 has split off. */
+
+ if (l >= i__ - 1) {
+ goto L140;
+ }
+
+/*
+ Now the active submatrix is in rows and columns L to I. If
+ eigenvalues only are being computed, only the active submatrix
+ need be transformed.
+*/
+
+ if (! (*wantt)) {
+ i1 = l;
+ i2 = i__;
+ }
+
+ if ((its == 10) || (its == 20)) {
+
+/* Exceptional shift. */
+
+ s = (r__1 = h__[i__ + (i__ - 1) * h_dim1], dabs(r__1)) + (r__2 =
+ h__[i__ - 1 + (i__ - 2) * h_dim1], dabs(r__2));
+ h44 = s * .75f + h__[i__ + i__ * h_dim1];
+ h33 = h44;
+ h43h34 = s * -.4375f * s;
+ } else {
+
+/*
+ Prepare to use Francis' double shift
+ (i.e. 2nd degree generalized Rayleigh quotient)
+*/
+
+ h44 = h__[i__ + i__ * h_dim1];
+ h33 = h__[i__ - 1 + (i__ - 1) * h_dim1];
+ h43h34 = h__[i__ + (i__ - 1) * h_dim1] * h__[i__ - 1 + i__ *
+ h_dim1];
+ s = h__[i__ - 1 + (i__ - 2) * h_dim1] * h__[i__ - 1 + (i__ - 2) *
+ h_dim1];
+ disc = (h33 - h44) * .5f;
+ disc = disc * disc + h43h34;
+ if (disc > 0.f) {
+
+/* Real roots: use Wilkinson's shift twice */
+
+ disc = sqrt(disc);
+ ave = (h33 + h44) * .5f;
+ if (dabs(h33) - dabs(h44) > 0.f) {
+ h33 = h33 * h44 - h43h34;
+ h44 = h33 / (r_sign(&disc, &ave) + ave);
+ } else {
+ h44 = r_sign(&disc, &ave) + ave;
+ }
+ h33 = h44;
+ h43h34 = 0.f;
+ }
+ }
+
+/* Look for two consecutive small subdiagonal elements. */
+
+ i__2 = l;
+ for (m = i__ - 2; m >= i__2; --m) {
+/*
+ Determine the effect of starting the double-shift QR
+ iteration at row M, and see if this would make H(M,M-1)
+ negligible.
+*/
+
+ h11 = h__[m + m * h_dim1];
+ h22 = h__[m + 1 + (m + 1) * h_dim1];
+ h21 = h__[m + 1 + m * h_dim1];
+ h12 = h__[m + (m + 1) * h_dim1];
+ h44s = h44 - h11;
+ h33s = h33 - h11;
+ v1 = (h33s * h44s - h43h34) / h21 + h12;
+ v2 = h22 - h11 - h33s - h44s;
+ v3 = h__[m + 2 + (m + 1) * h_dim1];
+ s = dabs(v1) + dabs(v2) + dabs(v3);
+ v1 /= s;
+ v2 /= s;
+ v3 /= s;
+ v[0] = v1;
+ v[1] = v2;
+ v[2] = v3;
+ if (m == l) {
+ goto L50;
+ }
+ h00 = h__[m - 1 + (m - 1) * h_dim1];
+ h10 = h__[m + (m - 1) * h_dim1];
+ tst1 = dabs(v1) * (dabs(h00) + dabs(h11) + dabs(h22));
+ if (dabs(h10) * (dabs(v2) + dabs(v3)) <= ulp * tst1) {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+
+/* Double-shift QR step */
+
+ i__2 = i__ - 1;
+ for (k = m; k <= i__2; ++k) {
+
+/*
+ The first iteration of this loop determines a reflection G
+ from the vector V and applies it from left and right to H,
+ thus creating a nonzero bulge below the subdiagonal.
+
+ Each subsequent iteration determines a reflection G to
+ restore the Hessenberg form in the (K-1)th column, and thus
+ chases the bulge one step toward the bottom of the active
+ submatrix. NR is the order of G.
+
+ Computing MIN
+*/
+ i__3 = 3, i__4 = i__ - k + 1;
+ nr = min(i__3,i__4);
+ if (k > m) {
+ scopy_(&nr, &h__[k + (k - 1) * h_dim1], &c__1, v, &c__1);
+ }
+ slarfg_(&nr, v, &v[1], &c__1, &t1);
+ if (k > m) {
+ h__[k + (k - 1) * h_dim1] = v[0];
+ h__[k + 1 + (k - 1) * h_dim1] = 0.f;
+ if (k < i__ - 1) {
+ h__[k + 2 + (k - 1) * h_dim1] = 0.f;
+ }
+ } else if (m > l) {
+ h__[k + (k - 1) * h_dim1] = -h__[k + (k - 1) * h_dim1];
+ }
+ v2 = v[1];
+ t2 = t1 * v2;
+ if (nr == 3) {
+ v3 = v[2];
+ t3 = t1 * v3;
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1]
+ + v3 * h__[k + 2 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+ h__[k + 2 + j * h_dim1] -= sum * t3;
+/* L60: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+
+ Computing MIN
+*/
+ i__4 = k + 3;
+ i__3 = min(i__4,i__);
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ + v3 * h__[j + (k + 2) * h_dim1];
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+ h__[j + (k + 2) * h_dim1] -= sum * t3;
+/* L70: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1] + v3 * z__[j + (k + 2) * z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+ z__[j + (k + 2) * z_dim1] -= sum * t3;
+/* L80: */
+ }
+ }
+ } else if (nr == 2) {
+
+/*
+ Apply G from the left to transform the rows of the matrix
+ in columns K to I2.
+*/
+
+ i__3 = i2;
+ for (j = k; j <= i__3; ++j) {
+ sum = h__[k + j * h_dim1] + v2 * h__[k + 1 + j * h_dim1];
+ h__[k + j * h_dim1] -= sum * t1;
+ h__[k + 1 + j * h_dim1] -= sum * t2;
+/* L90: */
+ }
+
+/*
+ Apply G from the right to transform the columns of the
+ matrix in rows I1 to min(K+3,I).
+*/
+
+ i__3 = i__;
+ for (j = i1; j <= i__3; ++j) {
+ sum = h__[j + k * h_dim1] + v2 * h__[j + (k + 1) * h_dim1]
+ ;
+ h__[j + k * h_dim1] -= sum * t1;
+ h__[j + (k + 1) * h_dim1] -= sum * t2;
+/* L100: */
+ }
+
+ if (*wantz) {
+
+/* Accumulate transformations in the matrix Z */
+
+ i__3 = *ihiz;
+ for (j = *iloz; j <= i__3; ++j) {
+ sum = z__[j + k * z_dim1] + v2 * z__[j + (k + 1) *
+ z_dim1];
+ z__[j + k * z_dim1] -= sum * t1;
+ z__[j + (k + 1) * z_dim1] -= sum * t2;
+/* L110: */
+ }
+ }
+ }
+/* L120: */
+ }
+
+/* L130: */
+ }
+
+/* Failure to converge in remaining number of iterations */
+
+ *info = i__;
+ return 0;
+
+L140:
+
+ if (l == i__) {
+
+/* H(I,I-1) is negligible: one eigenvalue has converged. */
+
+ wr[i__] = h__[i__ + i__ * h_dim1];
+ wi[i__] = 0.f;
+ } else if (l == i__ - 1) {
+
+/*
+ H(I-1,I-2) is negligible: a pair of eigenvalues have converged.
+
+ Transform the 2-by-2 submatrix to standard Schur form,
+ and compute and store the eigenvalues.
+*/
+
+ slanv2_(&h__[i__ - 1 + (i__ - 1) * h_dim1], &h__[i__ - 1 + i__ *
+ h_dim1], &h__[i__ + (i__ - 1) * h_dim1], &h__[i__ + i__ *
+ h_dim1], &wr[i__ - 1], &wi[i__ - 1], &wr[i__], &wi[i__], &cs,
+ &sn);
+
+ if (*wantt) {
+
+/* Apply the transformation to the rest of H. */
+
+ if (i2 > i__) {
+ i__1 = i2 - i__;
+ srot_(&i__1, &h__[i__ - 1 + (i__ + 1) * h_dim1], ldh, &h__[
+ i__ + (i__ + 1) * h_dim1], ldh, &cs, &sn);
+ }
+ i__1 = i__ - i1 - 1;
+ srot_(&i__1, &h__[i1 + (i__ - 1) * h_dim1], &c__1, &h__[i1 + i__ *
+ h_dim1], &c__1, &cs, &sn);
+ }
+ if (*wantz) {
+
+/* Apply the transformation to Z. */
+
+ srot_(&nz, &z__[*iloz + (i__ - 1) * z_dim1], &c__1, &z__[*iloz +
+ i__ * z_dim1], &c__1, &cs, &sn);
+ }
+ }
+
+/*
+ Decrement number of remaining iterations, and return to start of
+ the main loop with new value of I.
+*/
+
+ itn -= its;
+ i__ = l - 1;
+ goto L10;
+
+L150:
+ return 0;
+
+/* End of SLAHQR */
+
+} /* slahqr_ */
+
+/* Subroutine */ int slahrd_(integer *n, integer *k, integer *nb, real *a,
+ integer *lda, real *tau, real *t, integer *ldt, real *y, integer *ldy)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, t_dim1, t_offset, y_dim1, y_offset, i__1, i__2,
+ i__3;
+ real r__1;
+
+ /* Local variables */
+ static integer i__;
+ static real ei;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), scopy_(
+ integer *, real *, integer *, real *, integer *), saxpy_(integer *
+ , real *, real *, integer *, real *, integer *), strmv_(char *,
+ char *, char *, integer *, real *, integer *, real *, integer *), slarfg_(integer *, real *, real *,
+ integer *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLAHRD reduces the first NB columns of a real general n-by-(n-k+1)
+ matrix A so that elements below the k-th subdiagonal are zero. The
+ reduction is performed by an orthogonal similarity transformation
+ Q' * A * Q. The routine returns the matrices V and T which determine
+ Q as a block reflector I - V*T*V', and also the matrix Y = A * V * T.
+
+ This is an auxiliary routine called by SGEHRD.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ K (input) INTEGER
+ The offset for the reduction. Elements below the k-th
+ subdiagonal in the first NB columns are reduced to zero.
+
+ NB (input) INTEGER
+ The number of columns to be reduced.
+
+ A (input/output) REAL array, dimension (LDA,N-K+1)
+ On entry, the n-by-(n-k+1) general matrix A.
+ On exit, the elements on and above the k-th subdiagonal in
+ the first NB columns are overwritten with the corresponding
+ elements of the reduced matrix; the elements below the k-th
+ subdiagonal, with the array TAU, represent the matrix Q as a
+ product of elementary reflectors. The other columns of A are
+ unchanged. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (output) REAL array, dimension (NB)
+ The scalar factors of the elementary reflectors. See Further
+ Details.
+
+ T (output) REAL array, dimension (LDT,NB)
+ The upper triangular matrix T.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= NB.
+
+ Y (output) REAL array, dimension (LDY,NB)
+ The n-by-nb matrix Y.
+
+ LDY (input) INTEGER
+ The leading dimension of the array Y. LDY >= N.
+
+ Further Details
+ ===============
+
+ The matrix Q is represented as a product of nb elementary reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i+k-1) = 0, v(i+k) = 1; v(i+k+1:n) is stored on exit in
+ A(i+k+1:n,i), and tau in TAU(i).
+
+ The elements of the vectors v together form the (n-k+1)-by-nb matrix
+ V which is needed, with T and Y, to apply the transformation to the
+ unreduced part of the matrix, using an update of the form:
+ A := (I - V*T*V') * (A - Y*V').
+
+ The contents of A on exit are illustrated by the following example
+ with n = 7, k = 3 and nb = 2:
+
+ ( a h a a a )
+ ( a h a a a )
+ ( a h a a a )
+ ( h h a a a )
+ ( v1 h a a a )
+ ( v1 v2 a a a )
+ ( v1 v2 a a a )
+
+ where a denotes an element of the original matrix A, h denotes a
+ modified element of the upper Hessenberg matrix H, and vi denotes an
+ element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ --tau;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ y_dim1 = *ldy;
+ y_offset = 1 + y_dim1;
+ y -= y_offset;
+
+ /* Function Body */
+ if (*n <= 1) {
+ return 0;
+ }
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (i__ > 1) {
+
+/*
+ Update A(1:n,i)
+
+ Compute i-th column of A - Y * V'
+*/
+
+ i__2 = i__ - 1;
+ sgemv_("No transpose", n, &i__2, &c_b1290, &y[y_offset], ldy, &a[*
+ k + i__ - 1 + a_dim1], lda, &c_b1011, &a[i__ * a_dim1 + 1]
+ , &c__1);
+
+/*
+ Apply I - V * T' * V' to this column (call it b) from the
+ left, using the last column of T as workspace
+
+ Let V = ( V1 ) and b = ( b1 ) (first I-1 rows)
+ ( V2 ) ( b2 )
+
+ where V1 is unit lower triangular
+
+ w := V1' * b1
+*/
+
+ i__2 = i__ - 1;
+ scopy_(&i__2, &a[*k + 1 + i__ * a_dim1], &c__1, &t[*nb * t_dim1 +
+ 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Lower", "Transpose", "Unit", &i__2, &a[*k + 1 + a_dim1],
+ lda, &t[*nb * t_dim1 + 1], &c__1);
+
+/* w := w + V2'*b2 */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b1011, &t[*
+ nb * t_dim1 + 1], &c__1);
+
+/* w := T'*w */
+
+ i__2 = i__ - 1;
+ strmv_("Upper", "Transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[*nb * t_dim1 + 1], &c__1);
+
+/* b2 := b2 - V2*w */
+
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[*k + i__ +
+ a_dim1], lda, &t[*nb * t_dim1 + 1], &c__1, &c_b1011, &a[*
+ k + i__ + i__ * a_dim1], &c__1);
+
+/* b1 := b1 - V1*w */
+
+ i__2 = i__ - 1;
+ strmv_("Lower", "No transpose", "Unit", &i__2, &a[*k + 1 + a_dim1]
+ , lda, &t[*nb * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ saxpy_(&i__2, &c_b1290, &t[*nb * t_dim1 + 1], &c__1, &a[*k + 1 +
+ i__ * a_dim1], &c__1);
+
+ a[*k + i__ - 1 + (i__ - 1) * a_dim1] = ei;
+ }
+
+/*
+ Generate the elementary reflector H(i) to annihilate
+ A(k+i+1:n,i)
+*/
+
+ i__2 = *n - *k - i__ + 1;
+/* Computing MIN */
+ i__3 = *k + i__ + 1;
+ slarfg_(&i__2, &a[*k + i__ + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &tau[i__]);
+ ei = a[*k + i__ + i__ * a_dim1];
+ a[*k + i__ + i__ * a_dim1] = 1.f;
+
+/* Compute Y(1:n,i) */
+
+ i__2 = *n - *k - i__ + 1;
+ sgemv_("No transpose", n, &i__2, &c_b1011, &a[(i__ + 1) * a_dim1 + 1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b320, &y[i__ *
+ y_dim1 + 1], &c__1);
+ i__2 = *n - *k - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[*k + i__ + a_dim1],
+ lda, &a[*k + i__ + i__ * a_dim1], &c__1, &c_b320, &t[i__ *
+ t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ sgemv_("No transpose", n, &i__2, &c_b1290, &y[y_offset], ldy, &t[i__ *
+ t_dim1 + 1], &c__1, &c_b1011, &y[i__ * y_dim1 + 1], &c__1);
+ sscal_(n, &tau[i__], &y[i__ * y_dim1 + 1], &c__1);
+
+/* Compute T(1:i,i) */
+
+ i__2 = i__ - 1;
+ r__1 = -tau[i__];
+ sscal_(&i__2, &r__1, &t[i__ * t_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ strmv_("Upper", "No transpose", "Non-unit", &i__2, &t[t_offset], ldt,
+ &t[i__ * t_dim1 + 1], &c__1)
+ ;
+ t[i__ + i__ * t_dim1] = tau[i__];
+
+/* L10: */
+ }
+ a[*k + *nb + *nb * a_dim1] = ei;
+
+ return 0;
+
+/* End of SLAHRD */
+
+} /* slahrd_ */
+
+/* Subroutine */ int slaln2_(logical *ltrans, integer *na, integer *nw, real *
+ smin, real *ca, real *a, integer *lda, real *d1, real *d2, real *b,
+ integer *ldb, real *wr, real *wi, real *x, integer *ldx, real *scale,
+ real *xnorm, integer *info)
+{
+ /* Initialized data */
+
+ static logical cswap[4] = { FALSE_,FALSE_,TRUE_,TRUE_ };
+ static logical rswap[4] = { FALSE_,TRUE_,FALSE_,TRUE_ };
+ static integer ipivot[16] /* was [4][4] */ = { 1,2,3,4,2,1,4,3,3,4,1,2,
+ 4,3,2,1 };
+
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, x_dim1, x_offset;
+ real r__1, r__2, r__3, r__4, r__5, r__6;
+ static real equiv_0[4], equiv_1[4];
+
+ /* Local variables */
+ static integer j;
+#define ci (equiv_0)
+#define cr (equiv_1)
+ static real bi1, bi2, br1, br2, xi1, xi2, xr1, xr2, ci21, ci22, cr21,
+ cr22, li21, csi, ui11, lr21, ui12, ui22;
+#define civ (equiv_0)
+ static real csr, ur11, ur12, ur22;
+#define crv (equiv_1)
+ static real bbnd, cmax, ui11r, ui12s, temp, ur11r, ur12s, u22abs;
+ static integer icmax;
+ static real bnorm, cnorm, smini;
+ extern doublereal slamch_(char *);
+ static real bignum;
+ extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real *
+ , real *);
+ static real smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLALN2 solves a system of the form (ca A - w D ) X = s B
+ or (ca A' - w D) X = s B with possible scaling ("s") and
+ perturbation of A. (A' means A-transpose.)
+
+ A is an NA x NA real matrix, ca is a real scalar, D is an NA x NA
+ real diagonal matrix, w is a real or complex value, and X and B are
+ NA x 1 matrices -- real if w is real, complex if w is complex. NA
+ may be 1 or 2.
+
+ If w is complex, X and B are represented as NA x 2 matrices,
+ the first column of each being the real part and the second
+ being the imaginary part.
+
+ "s" is a scaling factor (.LE. 1), computed by SLALN2, which is
+ so chosen that X can be computed without overflow. X is further
+ scaled if necessary to assure that norm(ca A - w D)*norm(X) is less
+ than overflow.
+
+ If both singular values of (ca A - w D) are less than SMIN,
+ SMIN*identity will be used instead of (ca A - w D). If only one
+ singular value is less than SMIN, one element of (ca A - w D) will be
+ perturbed enough to make the smallest singular value roughly SMIN.
+ If both singular values are at least SMIN, (ca A - w D) will not be
+ perturbed. In any case, the perturbation will be at most some small
+ multiple of max( SMIN, ulp*norm(ca A - w D) ). The singular values
+ are computed by infinity-norm approximations, and thus will only be
+ correct to a factor of 2 or so.
+
+ Note: all input quantities are assumed to be smaller than overflow
+ by a reasonable factor. (See BIGNUM.)
+
+ Arguments
+ ==========
+
+ LTRANS (input) LOGICAL
+ =.TRUE.: A-transpose will be used.
+ =.FALSE.: A will be used (not transposed.)
+
+ NA (input) INTEGER
+ The size of the matrix A. It may (only) be 1 or 2.
+
+ NW (input) INTEGER
+ 1 if "w" is real, 2 if "w" is complex. It may only be 1
+ or 2.
+
+ SMIN (input) REAL
+ The desired lower bound on the singular values of A. This
+ should be a safe distance away from underflow or overflow,
+ say, between (underflow/machine precision) and (machine
+ precision * overflow ). (See BIGNUM and ULP.)
+
+ CA (input) REAL
+ The coefficient c, which A is multiplied by.
+
+ A (input) REAL array, dimension (LDA,NA)
+ The NA x NA matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of A. It must be at least NA.
+
+ D1 (input) REAL
+ The 1,1 element in the diagonal matrix D.
+
+ D2 (input) REAL
+ The 2,2 element in the diagonal matrix D. Not used if NW=1.
+
+ B (input) REAL array, dimension (LDB,NW)
+ The NA x NW matrix B (right-hand side). If NW=2 ("w" is
+ complex), column 1 contains the real part of B and column 2
+ contains the imaginary part.
+
+ LDB (input) INTEGER
+ The leading dimension of B. It must be at least NA.
+
+ WR (input) REAL
+ The real part of the scalar "w".
+
+ WI (input) REAL
+ The imaginary part of the scalar "w". Not used if NW=1.
+
+ X (output) REAL array, dimension (LDX,NW)
+ The NA x NW matrix X (unknowns), as computed by SLALN2.
+ If NW=2 ("w" is complex), on exit, column 1 will contain
+ the real part of X and column 2 will contain the imaginary
+ part.
+
+ LDX (input) INTEGER
+ The leading dimension of X. It must be at least NA.
+
+ SCALE (output) REAL
+ The scale factor that B must be multiplied by to insure
+ that overflow does not occur when computing X. Thus,
+ (ca A - w D) X will be SCALE*B, not B (ignoring
+ perturbations of A.) It will be at most 1.
+
+ XNORM (output) REAL
+ The infinity-norm of X, when X is regarded as an NA x NW
+ real matrix.
+
+ INFO (output) INTEGER
+ An error flag. It will be set to zero if no error occurs,
+ a negative number if an argument is in error, or a positive
+ number if ca A - w D had to be perturbed.
+ The possible values are:
+ = 0: No error occurred, and (ca A - w D) did not have to be
+ perturbed.
+ = 1: (ca A - w D) had to be perturbed to make its smallest
+ (or only) singular value greater than SMIN.
+ NOTE: In the interests of speed, this routine does not
+ check the inputs for errors.
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ x_dim1 = *ldx;
+ x_offset = 1 + x_dim1;
+ x -= x_offset;
+
+ /* Function Body */
+
+/* Compute BIGNUM */
+
+ smlnum = 2.f * slamch_("Safe minimum");
+ bignum = 1.f / smlnum;
+ smini = dmax(*smin,smlnum);
+
+/* Don't check for input errors */
+
+ *info = 0;
+
+/* Standard Initializations */
+
+ *scale = 1.f;
+
+ if (*na == 1) {
+
+/* 1 x 1 (i.e., scalar) system C X = B */
+
+ if (*nw == 1) {
+
+/*
+ Real 1x1 system.
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cnorm = dabs(csr);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (r__1 = b[b_dim1 + 1], dabs(r__1));
+ if (cnorm < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1.f / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ x[x_dim1 + 1] = b[b_dim1 + 1] * *scale / csr;
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1));
+ } else {
+
+/*
+ Complex 1x1 system (w is complex)
+
+ C = ca A - w D
+*/
+
+ csr = *ca * a[a_dim1 + 1] - *wr * *d1;
+ csi = -(*wi) * *d1;
+ cnorm = dabs(csr) + dabs(csi);
+
+/* If | C | < SMINI, use C = SMINI */
+
+ if (cnorm < smini) {
+ csr = smini;
+ csi = 0.f;
+ cnorm = smini;
+ *info = 1;
+ }
+
+/* Check scaling for X = B / C */
+
+ bnorm = (r__1 = b[b_dim1 + 1], dabs(r__1)) + (r__2 = b[((b_dim1)
+ << (1)) + 1], dabs(r__2));
+ if (cnorm < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * cnorm) {
+ *scale = 1.f / bnorm;
+ }
+ }
+
+/* Compute X */
+
+ r__1 = *scale * b[b_dim1 + 1];
+ r__2 = *scale * b[((b_dim1) << (1)) + 1];
+ sladiv_(&r__1, &r__2, &csr, &csi, &x[x_dim1 + 1], &x[((x_dim1) <<
+ (1)) + 1]);
+ *xnorm = (r__1 = x[x_dim1 + 1], dabs(r__1)) + (r__2 = x[((x_dim1)
+ << (1)) + 1], dabs(r__2));
+ }
+
+ } else {
+
+/*
+ 2x2 System
+
+ Compute the real part of C = ca A - w D (or ca A' - w D )
+*/
+
+ cr[0] = *ca * a[a_dim1 + 1] - *wr * *d1;
+ cr[3] = *ca * a[((a_dim1) << (1)) + 2] - *wr * *d2;
+ if (*ltrans) {
+ cr[2] = *ca * a[a_dim1 + 2];
+ cr[1] = *ca * a[((a_dim1) << (1)) + 1];
+ } else {
+ cr[1] = *ca * a[a_dim1 + 2];
+ cr[2] = *ca * a[((a_dim1) << (1)) + 1];
+ }
+
+ if (*nw == 1) {
+
+/*
+ Real 2x2 system (w is real)
+
+ Find the largest element in C
+*/
+
+ cmax = 0.f;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((r__1 = crv[j - 1], dabs(r__1)) > cmax) {
+ cmax = (r__1 = crv[j - 1], dabs(r__1));
+ icmax = j;
+ }
+/* L10: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ r__3 = (r__1 = b[b_dim1 + 1], dabs(r__1)), r__4 = (r__2 = b[
+ b_dim1 + 2], dabs(r__2));
+ bnorm = dmax(r__3,r__4);
+ if (smini < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * smini) {
+ *scale = 1.f / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ur11r = 1.f / ur11;
+ lr21 = ur11r * cr21;
+ ur22 = cr22 - ur12 * lr21;
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (dabs(ur22) < smini) {
+ ur22 = smini;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br1 = b[b_dim1 + 2];
+ br2 = b[b_dim1 + 1];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ }
+ br2 -= lr21 * br1;
+/* Computing MAX */
+ r__2 = (r__1 = br1 * (ur22 * ur11r), dabs(r__1)), r__3 = dabs(br2)
+ ;
+ bbnd = dmax(r__2,r__3);
+ if (bbnd > 1.f && dabs(ur22) < 1.f) {
+ if (bbnd >= bignum * dabs(ur22)) {
+ *scale = 1.f / bbnd;
+ }
+ }
+
+ xr2 = br2 * *scale / ur22;
+ xr1 = *scale * br1 * ur11r - xr2 * (ur11r * ur12);
+ if (cswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ }
+/* Computing MAX */
+ r__1 = dabs(xr1), r__2 = dabs(xr2);
+ *xnorm = dmax(r__1,r__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1.f && cmax > 1.f) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ } else {
+
+/*
+ Complex 2x2 system (w is complex)
+
+ Find the largest element in C
+*/
+
+ ci[0] = -(*wi) * *d1;
+ ci[1] = 0.f;
+ ci[2] = 0.f;
+ ci[3] = -(*wi) * *d2;
+ cmax = 0.f;
+ icmax = 0;
+
+ for (j = 1; j <= 4; ++j) {
+ if ((r__1 = crv[j - 1], dabs(r__1)) + (r__2 = civ[j - 1],
+ dabs(r__2)) > cmax) {
+ cmax = (r__1 = crv[j - 1], dabs(r__1)) + (r__2 = civ[j -
+ 1], dabs(r__2));
+ icmax = j;
+ }
+/* L20: */
+ }
+
+/* If norm(C) < SMINI, use SMINI*identity. */
+
+ if (cmax < smini) {
+/* Computing MAX */
+ r__5 = (r__1 = b[b_dim1 + 1], dabs(r__1)) + (r__2 = b[((
+ b_dim1) << (1)) + 1], dabs(r__2)), r__6 = (r__3 = b[
+ b_dim1 + 2], dabs(r__3)) + (r__4 = b[((b_dim1) << (1))
+ + 2], dabs(r__4));
+ bnorm = dmax(r__5,r__6);
+ if (smini < 1.f && bnorm > 1.f) {
+ if (bnorm > bignum * smini) {
+ *scale = 1.f / bnorm;
+ }
+ }
+ temp = *scale / smini;
+ x[x_dim1 + 1] = temp * b[b_dim1 + 1];
+ x[x_dim1 + 2] = temp * b[b_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * b[((b_dim1) << (1)) + 1];
+ x[((x_dim1) << (1)) + 2] = temp * b[((b_dim1) << (1)) + 2];
+ *xnorm = temp * bnorm;
+ *info = 1;
+ return 0;
+ }
+
+/* Gaussian elimination with complete pivoting. */
+
+ ur11 = crv[icmax - 1];
+ ui11 = civ[icmax - 1];
+ cr21 = crv[ipivot[((icmax) << (2)) - 3] - 1];
+ ci21 = civ[ipivot[((icmax) << (2)) - 3] - 1];
+ ur12 = crv[ipivot[((icmax) << (2)) - 2] - 1];
+ ui12 = civ[ipivot[((icmax) << (2)) - 2] - 1];
+ cr22 = crv[ipivot[((icmax) << (2)) - 1] - 1];
+ ci22 = civ[ipivot[((icmax) << (2)) - 1] - 1];
+ if ((icmax == 1) || (icmax == 4)) {
+
+/* Code when off-diagonals of pivoted C are real */
+
+ if (dabs(ur11) > dabs(ui11)) {
+ temp = ui11 / ur11;
+/* Computing 2nd power */
+ r__1 = temp;
+ ur11r = 1.f / (ur11 * (r__1 * r__1 + 1.f));
+ ui11r = -temp * ur11r;
+ } else {
+ temp = ur11 / ui11;
+/* Computing 2nd power */
+ r__1 = temp;
+ ui11r = -1.f / (ui11 * (r__1 * r__1 + 1.f));
+ ur11r = -temp * ui11r;
+ }
+ lr21 = cr21 * ur11r;
+ li21 = cr21 * ui11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ur12 * ui11r;
+ ur22 = cr22 - ur12 * lr21;
+ ui22 = ci22 - ur12 * li21;
+ } else {
+
+/* Code when diagonals of pivoted C are real */
+
+ ur11r = 1.f / ur11;
+ ui11r = 0.f;
+ lr21 = cr21 * ur11r;
+ li21 = ci21 * ur11r;
+ ur12s = ur12 * ur11r;
+ ui12s = ui12 * ur11r;
+ ur22 = cr22 - ur12 * lr21 + ui12 * li21;
+ ui22 = -ur12 * li21 - ui12 * lr21;
+ }
+ u22abs = dabs(ur22) + dabs(ui22);
+
+/* If smaller pivot < SMINI, use SMINI */
+
+ if (u22abs < smini) {
+ ur22 = smini;
+ ui22 = 0.f;
+ *info = 1;
+ }
+ if (rswap[icmax - 1]) {
+ br2 = b[b_dim1 + 1];
+ br1 = b[b_dim1 + 2];
+ bi2 = b[((b_dim1) << (1)) + 1];
+ bi1 = b[((b_dim1) << (1)) + 2];
+ } else {
+ br1 = b[b_dim1 + 1];
+ br2 = b[b_dim1 + 2];
+ bi1 = b[((b_dim1) << (1)) + 1];
+ bi2 = b[((b_dim1) << (1)) + 2];
+ }
+ br2 = br2 - lr21 * br1 + li21 * bi1;
+ bi2 = bi2 - li21 * br1 - lr21 * bi1;
+/* Computing MAX */
+ r__1 = (dabs(br1) + dabs(bi1)) * (u22abs * (dabs(ur11r) + dabs(
+ ui11r))), r__2 = dabs(br2) + dabs(bi2);
+ bbnd = dmax(r__1,r__2);
+ if (bbnd > 1.f && u22abs < 1.f) {
+ if (bbnd >= bignum * u22abs) {
+ *scale = 1.f / bbnd;
+ br1 = *scale * br1;
+ bi1 = *scale * bi1;
+ br2 = *scale * br2;
+ bi2 = *scale * bi2;
+ }
+ }
+
+ sladiv_(&br2, &bi2, &ur22, &ui22, &xr2, &xi2);
+ xr1 = ur11r * br1 - ui11r * bi1 - ur12s * xr2 + ui12s * xi2;
+ xi1 = ui11r * br1 + ur11r * bi1 - ui12s * xr2 - ur12s * xi2;
+ if (cswap[icmax - 1]) {
+ x[x_dim1 + 1] = xr2;
+ x[x_dim1 + 2] = xr1;
+ x[((x_dim1) << (1)) + 1] = xi2;
+ x[((x_dim1) << (1)) + 2] = xi1;
+ } else {
+ x[x_dim1 + 1] = xr1;
+ x[x_dim1 + 2] = xr2;
+ x[((x_dim1) << (1)) + 1] = xi1;
+ x[((x_dim1) << (1)) + 2] = xi2;
+ }
+/* Computing MAX */
+ r__1 = dabs(xr1) + dabs(xi1), r__2 = dabs(xr2) + dabs(xi2);
+ *xnorm = dmax(r__1,r__2);
+
+/* Further scaling if norm(A) norm(X) > overflow */
+
+ if (*xnorm > 1.f && cmax > 1.f) {
+ if (*xnorm > bignum / cmax) {
+ temp = cmax / bignum;
+ x[x_dim1 + 1] = temp * x[x_dim1 + 1];
+ x[x_dim1 + 2] = temp * x[x_dim1 + 2];
+ x[((x_dim1) << (1)) + 1] = temp * x[((x_dim1) << (1)) + 1]
+ ;
+ x[((x_dim1) << (1)) + 2] = temp * x[((x_dim1) << (1)) + 2]
+ ;
+ *xnorm = temp * *xnorm;
+ *scale = temp * *scale;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLALN2 */
+
+} /* slaln2_ */
+
+#undef crv
+#undef civ
+#undef cr
+#undef ci
+
+
+/* Subroutine */ int slals0_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *nrhs, real *b, integer *ldb, real *bx,
+ integer *ldbx, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, b_dim1, b_offset, bx_dim1, bx_offset,
+ difr_dim1, difr_offset, givnum_dim1, givnum_offset, poles_dim1,
+ poles_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, j, m, n;
+ static real dj;
+ static integer nlp1;
+ static real temp;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static real diflj, difrj, dsigj;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), scopy_(
+ integer *, real *, integer *, real *, integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real dsigjp;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ December 1, 1999
+
+
+ Purpose
+ =======
+
+ SLALS0 applies back the multiplying factors of either the left or the
+ right singular vector matrix of a diagonal matrix appended by a row
+ to the right hand side matrix B in solving the least squares problem
+ using the divide-and-conquer SVD approach.
+
+ For the left singular vector matrix, three types of orthogonal
+ matrices are involved:
+
+ (1L) Givens rotations: the number of such rotations is GIVPTR; the
+ pairs of columns/rows they were applied to are stored in GIVCOL;
+ and the C- and S-values of these rotations are stored in GIVNUM.
+
+ (2L) Permutation. The (NL+1)-st row of B is to be moved to the first
+ row, and for J=2:N, PERM(J)-th row of B is to be moved to the
+ J-th row.
+
+ (3L) The left singular vector matrix of the remaining matrix.
+
+ For the right singular vector matrix, four types of orthogonal
+ matrices are involved:
+
+ (1R) The right singular vector matrix of the remaining matrix.
+
+ (2R) If SQRE = 1, one extra Givens rotation to generate the right
+ null space.
+
+ (3R) The inverse transformation of (2L).
+
+ (4R) The inverse transformation of (1L).
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Left singular vector matrix.
+ = 1: Right singular vector matrix.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input/output) REAL array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B. LDB must be at least
+ max(1,MAX( M, N ) ).
+
+ BX (workspace) REAL array, dimension ( LDBX, NRHS )
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ PERM (input) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) applied
+ to the two blocks.
+
+ GIVPTR (input) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of rows/columns
+ involved in a Givens rotation.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (input) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value used in the
+ corresponding Givens rotation.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of arrays DIFR, POLES and
+ GIVNUM, must be at least K.
+
+ POLES (input) REAL array, dimension ( LDGNUM, 2 )
+ On entry, POLES(1:K, 1) contains the new singular
+ values obtained from solving the secular equation, and
+ POLES(1:K, 2) is an array containing the poles in the secular
+ equation.
+
+ DIFL (input) REAL array, dimension ( K ).
+ On entry, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (input) REAL array, dimension ( LDGNUM, 2 ).
+ On entry, DIFR(I, 1) contains the distances between I-th
+ updated (undeflated) singular value and the I+1-th
+ (undeflated) old singular value. And DIFR(I, 2) is the
+ normalizing factor for the I-th right singular vector.
+
+ Z (input) REAL array, dimension ( K )
+ Contain the components of the deflation-adjusted updating row
+ vector.
+
+ K (input) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (input) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (input) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) REAL array, dimension ( K )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ difr_dim1 = *ldgnum;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --z__;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ }
+
+ n = *nl + *nr + 1;
+
+ if (*nrhs < 1) {
+ *info = -5;
+ } else if (*ldb < n) {
+ *info = -7;
+ } else if (*ldbx < n) {
+ *info = -9;
+ } else if (*givptr < 0) {
+ *info = -11;
+ } else if (*ldgcol < n) {
+ *info = -13;
+ } else if (*ldgnum < n) {
+ *info = -15;
+ } else if (*k < 1) {
+ *info = -20;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALS0", &i__1);
+ return 0;
+ }
+
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+
+ if (*icompq == 0) {
+
+/*
+ Apply back orthogonal transformations from the left.
+
+ Step (1L): apply back the Givens rotations performed.
+*/
+
+ i__1 = *givptr;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ srot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &givnum[i__ + givnum_dim1])
+ ;
+/* L10: */
+ }
+
+/* Step (2L): permute rows of B. */
+
+ scopy_(nrhs, &b[nlp1 + b_dim1], ldb, &bx[bx_dim1 + 1], ldbx);
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &b[perm[i__] + b_dim1], ldb, &bx[i__ + bx_dim1],
+ ldbx);
+/* L20: */
+ }
+
+/*
+ Step (3L): apply the inverse of the left singular vector
+ matrix to BX.
+*/
+
+ if (*k == 1) {
+ scopy_(nrhs, &bx[bx_offset], ldbx, &b[b_offset], ldb);
+ if (z__[1] < 0.f) {
+ sscal_(nrhs, &c_b1290, &b[b_offset], ldb);
+ }
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = poles[j + poles_dim1];
+ dsigj = -poles[j + ((poles_dim1) << (1))];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
+ }
+ if ((z__[j] == 0.f) || (poles[j + ((poles_dim1) << (1))] ==
+ 0.f)) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
+ diflj / (poles[j + ((poles_dim1) << (1))] + dj);
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.f) || (poles[i__ + ((poles_dim1) << (1)
+ )] == 0.f)) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (slamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigj) - diflj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L30: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if ((z__[i__] == 0.f) || (poles[i__ + ((poles_dim1) << (1)
+ )] == 0.f)) {
+ work[i__] = 0.f;
+ } else {
+ work[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
+ i__] / (slamc3_(&poles[i__ + ((poles_dim1) <<
+ (1))], &dsigjp) + difrj) / (poles[i__ + ((
+ poles_dim1) << (1))] + dj);
+ }
+/* L40: */
+ }
+ work[1] = -1.f;
+ temp = snrm2_(k, &work[1], &c__1);
+ sgemv_("T", k, nrhs, &c_b1011, &bx[bx_offset], ldbx, &work[1],
+ &c__1, &c_b320, &b[j + b_dim1], ldb);
+ slascl_("G", &c__0, &c__0, &temp, &c_b1011, &c__1, nrhs, &b[j
+ + b_dim1], ldb, info);
+/* L50: */
+ }
+ }
+
+/* Move the deflated rows of BX to B also. */
+
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &bx[*k + 1 + bx_dim1], ldbx, &b[*k + 1
+ + b_dim1], ldb);
+ }
+ } else {
+
+/*
+ Apply back the right orthogonal transformations.
+
+ Step (1R): apply back the new right singular vector matrix
+ to B.
+*/
+
+ if (*k == 1) {
+ scopy_(nrhs, &b[b_offset], ldb, &bx[bx_offset], ldbx);
+ } else {
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ dsigj = poles[j + ((poles_dim1) << (1))];
+ if (z__[j] == 0.f) {
+ work[j] = 0.f;
+ } else {
+ work[j] = -z__[j] / difl[j] / (dsigj + poles[j +
+ poles_dim1]) / difr[j + ((difr_dim1) << (1))];
+ }
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + 1 + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difr[
+ i__ + difr_dim1]) / (dsigj + poles[i__ +
+ poles_dim1]) / difr[i__ + ((difr_dim1) << (1))
+ ];
+ }
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ if (z__[j] == 0.f) {
+ work[i__] = 0.f;
+ } else {
+ r__1 = -poles[i__ + ((poles_dim1) << (1))];
+ work[i__] = z__[j] / (slamc3_(&dsigj, &r__1) - difl[
+ i__]) / (dsigj + poles[i__ + poles_dim1]) /
+ difr[i__ + ((difr_dim1) << (1))];
+ }
+/* L70: */
+ }
+ sgemv_("T", k, nrhs, &c_b1011, &b[b_offset], ldb, &work[1], &
+ c__1, &c_b320, &bx[j + bx_dim1], ldbx);
+/* L80: */
+ }
+ }
+
+/*
+ Step (2R): if SQRE = 1, apply back the rotation that is
+ related to the right null space of the subproblem.
+*/
+
+ if (*sqre == 1) {
+ scopy_(nrhs, &b[m + b_dim1], ldb, &bx[m + bx_dim1], ldbx);
+ srot_(nrhs, &bx[bx_dim1 + 1], ldbx, &bx[m + bx_dim1], ldbx, c__,
+ s);
+ }
+ if (*k < max(m,n)) {
+ i__1 = n - *k;
+ slacpy_("A", &i__1, nrhs, &b[*k + 1 + b_dim1], ldb, &bx[*k + 1 +
+ bx_dim1], ldbx);
+ }
+
+/* Step (3R): permute rows of B. */
+
+ scopy_(nrhs, &bx[bx_dim1 + 1], ldbx, &b[nlp1 + b_dim1], ldb);
+ if (*sqre == 1) {
+ scopy_(nrhs, &bx[m + bx_dim1], ldbx, &b[m + b_dim1], ldb);
+ }
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ scopy_(nrhs, &bx[i__ + bx_dim1], ldbx, &b[perm[i__] + b_dim1],
+ ldb);
+/* L90: */
+ }
+
+/* Step (4R): apply back the Givens rotations performed. */
+
+ for (i__ = *givptr; i__ >= 1; --i__) {
+ r__1 = -givnum[i__ + givnum_dim1];
+ srot_(nrhs, &b[givcol[i__ + ((givcol_dim1) << (1))] + b_dim1],
+ ldb, &b[givcol[i__ + givcol_dim1] + b_dim1], ldb, &givnum[
+ i__ + ((givnum_dim1) << (1))], &r__1);
+/* L100: */
+ }
+ }
+
+ return 0;
+
+/* End of SLALS0 */
+
+} /* slals0_ */
+
+/* Subroutine */ int slalsa_(integer *icompq, integer *smlsiz, integer *n,
+ integer *nrhs, real *b, integer *ldb, real *bx, integer *ldbx, real *
+ u, integer *ldu, real *vt, integer *k, real *difl, real *difr, real *
+ z__, real *poles, integer *givptr, integer *givcol, integer *ldgcol,
+ integer *perm, real *givnum, real *c__, real *s, real *work, integer *
+ iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, b_dim1,
+ b_offset, bx_dim1, bx_offset, difl_dim1, difl_offset, difr_dim1,
+ difr_offset, givnum_dim1, givnum_offset, poles_dim1, poles_offset,
+ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1,
+ i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl,
+ ndb1, nlp1, lvl2, nrp1, nlvl, sqre, inode, ndiml;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer ndimr;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slals0_(integer *, integer *, integer *, integer *,
+ integer *, real *, integer *, real *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+ , real *, real *, integer *, real *, real *, real *, integer *),
+ xerbla_(char *, integer *), slasdt_(integer *, integer *,
+ integer *, integer *, integer *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLALSA is an itermediate step in solving the least squares problem
+ by computing the SVD of the coefficient matrix in compact form (The
+ singular vectors are computed as products of simple orthorgonal
+ matrices.).
+
+ If ICOMPQ = 0, SLALSA applies the inverse of the left singular vector
+ matrix of an upper bidiagonal matrix to the right hand side; and if
+ ICOMPQ = 1, SLALSA applies the right singular vector matrix to the
+ right hand side. The singular vector matrices were generated in
+ compact form by SLALSA.
+
+ Arguments
+ =========
+
+
+ ICOMPQ (input) INTEGER
+ Specifies whether the left or the right singular vector
+ matrix is involved.
+ = 0: Left singular vector matrix
+ = 1: Right singular vector matrix
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row and column dimensions of the upper bidiagonal matrix.
+
+ NRHS (input) INTEGER
+ The number of columns of B and BX. NRHS must be at least 1.
+
+ B (input) REAL array, dimension ( LDB, NRHS )
+ On input, B contains the right hand sides of the least
+ squares problem in rows 1 through M. On output, B contains
+ the solution X in rows 1 through N.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,MAX( M, N ) ).
+
+ BX (output) REAL array, dimension ( LDBX, NRHS )
+ On exit, the result of applying the left or right singular
+ vector matrix to B.
+
+ LDBX (input) INTEGER
+ The leading dimension of BX.
+
+ U (input) REAL array, dimension ( LDU, SMLSIZ ).
+ On entry, U contains the left singular vector matrices of all
+ subproblems at the bottom level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR,
+ POLES, GIVNUM, and Z.
+
+ VT (input) REAL array, dimension ( LDU, SMLSIZ+1 ).
+ On entry, VT' contains the right singular vector matrices of
+ all subproblems at the bottom level.
+
+ K (input) INTEGER array, dimension ( N ).
+
+ DIFL (input) REAL array, dimension ( LDU, NLVL ).
+ where NLVL = INT(log_2 (N/(SMLSIZ+1))) + 1.
+
+ DIFR (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, DIFL(*, I) and DIFR(*, 2 * I -1) record
+ distances between singular values on the I-th level and
+ singular values on the (I -1)-th level, and DIFR(*, 2 * I)
+ record the normalizing factors of the right singular vectors
+ matrices of subproblems on I-th level.
+
+ Z (input) REAL array, dimension ( LDU, NLVL ).
+ On entry, Z(1, I) contains the components of the deflation-
+ adjusted updating row vector for subproblems on the I-th
+ level.
+
+ POLES (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, POLES(*, 2 * I -1: 2 * I) contains the new and old
+ singular values involved in the secular equations on the I-th
+ level.
+
+ GIVPTR (input) INTEGER array, dimension ( N ).
+ On entry, GIVPTR( I ) records the number of Givens
+ rotations performed on the I-th problem on the computation
+ tree.
+
+ GIVCOL (input) INTEGER array, dimension ( LDGCOL, 2 * NLVL ).
+ On entry, for each I, GIVCOL(*, 2 * I - 1: 2 * I) records the
+ locations of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (input) INTEGER array, dimension ( LDGCOL, NLVL ).
+ On entry, PERM(*, I) records permutations done on the I-th
+ level of the computation tree.
+
+ GIVNUM (input) REAL array, dimension ( LDU, 2 * NLVL ).
+ On entry, GIVNUM(*, 2 *I -1 : 2 * I) records the C- and S-
+ values of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ C (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (input) REAL array, dimension ( N ).
+ On entry, if the I-th subproblem is not square,
+ S( I ) contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) REAL array.
+ The dimension must be at least N.
+
+ IWORK (workspace) INTEGER array.
+ The dimension must be at least 3 * N
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ bx_dim1 = *ldbx;
+ bx_offset = 1 + bx_dim1;
+ bx -= bx_offset;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < *smlsiz) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if (*ldb < *n) {
+ *info = -6;
+ } else if (*ldbx < *n) {
+ *info = -8;
+ } else if (*ldu < *n) {
+ *info = -10;
+ } else if (*ldgcol < *n) {
+ *info = -19;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSA", &i__1);
+ return 0;
+ }
+
+/* Book-keeping and setting up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ The following code applies back the left singular vector factors.
+ For applying back the right singular vector factors, go to 50.
+*/
+
+ if (*icompq == 1) {
+ goto L50;
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding left and right singular vector
+ matrices are in explicit form. First apply back the left
+ singular vector matrices.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nl, nrhs, &nl, &c_b1011, &u[nlf + u_dim1], ldu, &b[
+ nlf + b_dim1], ldb, &c_b320, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nr, nrhs, &nr, &c_b1011, &u[nrf + u_dim1], ldu, &b[
+ nrf + b_dim1], ldb, &c_b320, &bx[nrf + bx_dim1], ldbx);
+/* L10: */
+ }
+
+/*
+ Next copy the rows of B that correspond to unchanged rows
+ in the bidiagonal matrix to BX.
+*/
+
+ i__1 = nd;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ ic = iwork[inode + i__ - 1];
+ scopy_(nrhs, &b[ic + b_dim1], ldb, &bx[ic + bx_dim1], ldbx);
+/* L20: */
+ }
+
+/*
+ Finally go through the left singular vector matrices of all
+ the other subproblems bottom-up on the tree.
+*/
+
+ j = pow_ii(&c__2, &nlvl);
+ sqre = 0;
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ find the first node LF and last node LL on
+ the current level LVL
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ --j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &bx[nlf + bx_dim1], ldbx, &
+ b[nlf + b_dim1], ldb, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L30: */
+ }
+/* L40: */
+ }
+ goto L90;
+
+/* ICOMPQ = 1: applying back the right singular vector factors. */
+
+L50:
+
+/*
+ First now go through the right singular vector matrices of all
+ the tree nodes top-down.
+*/
+
+ j = 0;
+ i__1 = nlvl;
+ for (lvl = 1; lvl <= i__1; ++lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__2 = lvl - 1;
+ lf = pow_ii(&c__2, &i__2);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__2 = lf;
+ for (i__ = ll; i__ >= i__2; --i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqre = 0;
+ } else {
+ sqre = 1;
+ }
+ ++j;
+ slals0_(icompq, &nl, &nr, &sqre, nrhs, &b[nlf + b_dim1], ldb, &bx[
+ nlf + bx_dim1], ldbx, &perm[nlf + lvl * perm_dim1], &
+ givptr[j], &givcol[nlf + lvl2 * givcol_dim1], ldgcol, &
+ givnum[nlf + lvl2 * givnum_dim1], ldu, &poles[nlf + lvl2 *
+ poles_dim1], &difl[nlf + lvl * difl_dim1], &difr[nlf +
+ lvl2 * difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[
+ j], &s[j], &work[1], info);
+/* L60: */
+ }
+/* L70: */
+ }
+
+/*
+ The nodes on the bottom level of the tree were solved
+ by SLASDQ. The corresponding right singular vector
+ matrices are in explicit form. Apply them back.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nr = iwork[ndimr + i1];
+ nlp1 = nl + 1;
+ if (i__ == nd) {
+ nrp1 = nr;
+ } else {
+ nrp1 = nr + 1;
+ }
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sgemm_("T", "N", &nlp1, nrhs, &nlp1, &c_b1011, &vt[nlf + vt_dim1],
+ ldu, &b[nlf + b_dim1], ldb, &c_b320, &bx[nlf + bx_dim1], ldbx);
+ sgemm_("T", "N", &nrp1, nrhs, &nrp1, &c_b1011, &vt[nrf + vt_dim1],
+ ldu, &b[nrf + b_dim1], ldb, &c_b320, &bx[nrf + bx_dim1], ldbx);
+/* L80: */
+ }
+
+L90:
+
+ return 0;
+
+/* End of SLALSA */
+
+} /* slalsa_ */
+
+/* Subroutine */ int slalsd_(char *uplo, integer *smlsiz, integer *n, integer
+ *nrhs, real *d__, real *e, real *b, integer *ldb, real *rcond,
+ integer *rank, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer b_dim1, b_offset, i__1, i__2;
+ real r__1;
+
+ /* Builtin functions */
+ double log(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer c__, i__, j, k;
+ static real r__;
+ static integer s, u, z__;
+ static real cs;
+ static integer bx;
+ static real sn;
+ static integer st, vt, nm1, st1;
+ static real eps;
+ static integer iwk;
+ static real tol;
+ static integer difl, difr, perm, nsub, nlvl, sqre, bxst;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *), sgemm_(char *, char *, integer *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ , real *, real *, integer *);
+ static integer poles, sizei, nsize;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static integer nwork, icmpq1, icmpq2;
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int slasda_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, integer *, real *, integer *,
+ real *, real *, real *, real *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, integer *, integer *),
+ xerbla_(char *, integer *), slalsa_(integer *, integer *,
+ integer *, integer *, real *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *, integer *, integer *, real *, real *, real *
+ , real *, integer *, integer *), slascl_(char *, integer *,
+ integer *, real *, real *, integer *, integer *, real *, integer *
+ , integer *);
+ static integer givcol;
+ extern integer isamax_(integer *, real *, integer *);
+ extern /* Subroutine */ int slasdq_(char *, integer *, integer *, integer
+ *, integer *, integer *, real *, real *, real *, integer *, real *
+ , integer *, real *, integer *, real *, integer *),
+ slacpy_(char *, integer *, integer *, real *, integer *, real *,
+ integer *), slartg_(real *, real *, real *, real *, real *
+ ), slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static real orgnrm;
+ static integer givnum;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+ static integer givptr, smlszp;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLALSD uses the singular value decomposition of A to solve the least
+ squares problem of finding X to minimize the Euclidean norm of each
+ column of A*X-B, where A is N-by-N upper bidiagonal, and X and B
+ are N-by-NRHS. The solution X overwrites B.
+
+ The singular values of A smaller than RCOND times the largest
+ singular value are treated as zero in solving the least squares
+ problem; in this case a minimum norm solution is returned.
+ The actual singular values are returned in D in ascending order.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': D and E define an upper bidiagonal matrix.
+ = 'L': D and E define a lower bidiagonal matrix.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The dimension of the bidiagonal matrix. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of columns of B. NRHS must be at least 1.
+
+ D (input/output) REAL array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit, if INFO = 0, D contains its singular values.
+
+ E (input) REAL array, dimension (N-1)
+ Contains the super-diagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On input, B contains the right hand sides of the least
+ squares problem. On output, B contains the solution X.
+
+ LDB (input) INTEGER
+ The leading dimension of B in the calling subprogram.
+ LDB must be at least max(1,N).
+
+ RCOND (input) REAL
+ The singular values of A less than or equal to RCOND times
+ the largest singular value are treated as zero in solving
+ the least squares problem. If RCOND is negative,
+ machine precision is used instead.
+ For example, if diag(S)*X=B were the least squares problem,
+ where diag(S) is a diagonal matrix of singular values, the
+ solution would be X(i) = B(i) / S(i) if S(i) is greater than
+ RCOND*max(S), and X(i) = 0 if S(i) is less than or equal to
+ RCOND*max(S).
+
+ RANK (output) INTEGER
+ The number of singular values of A greater than RCOND times
+ the largest singular value.
+
+ WORK (workspace) REAL array, dimension at least
+ (9*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2),
+ where NLVL = max(0, INT(log_2 (N/(SMLSIZ+1))) + 1).
+
+ IWORK (workspace) INTEGER array, dimension at least
+ (3*N*NLVL + 11*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an singular value while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through MOD(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Ren-Cang Li, Computer Science Division, University of
+ California at Berkeley, USA
+ Osni Marques, LBNL/NERSC, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -3;
+ } else if (*nrhs < 1) {
+ *info = -4;
+ } else if ((*ldb < 1) || (*ldb < *n)) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLALSD", &i__1);
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+/* Set up the tolerance. */
+
+ if ((*rcond <= 0.f) || (*rcond >= 1.f)) {
+ *rcond = eps;
+ }
+
+ *rank = 0;
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ if (d__[1] == 0.f) {
+ slaset_("A", &c__1, nrhs, &c_b320, &c_b320, &b[b_offset], ldb);
+ } else {
+ *rank = 1;
+ slascl_("G", &c__0, &c__0, &d__[1], &c_b1011, &c__1, nrhs, &b[
+ b_offset], ldb, info);
+ d__[1] = dabs(d__[1]);
+ }
+ return 0;
+ }
+
+/* Rotate the matrix if it is lower bidiagonal. */
+
+ if (*(unsigned char *)uplo == 'L') {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (*nrhs == 1) {
+ srot_(&c__1, &b[i__ + b_dim1], &c__1, &b[i__ + 1 + b_dim1], &
+ c__1, &cs, &sn);
+ } else {
+ work[((i__) << (1)) - 1] = cs;
+ work[i__ * 2] = sn;
+ }
+/* L10: */
+ }
+ if (*nrhs > 1) {
+ i__1 = *nrhs;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = *n - 1;
+ for (j = 1; j <= i__2; ++j) {
+ cs = work[((j) << (1)) - 1];
+ sn = work[j * 2];
+ srot_(&c__1, &b[j + i__ * b_dim1], &c__1, &b[j + 1 + i__ *
+ b_dim1], &c__1, &cs, &sn);
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ }
+
+/* Scale. */
+
+ nm1 = *n - 1;
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ slaset_("A", n, nrhs, &c_b320, &c_b320, &b[b_offset], ldb);
+ return 0;
+ }
+
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, &c__1, &d__[1], n, info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &nm1, &c__1, &e[1], &nm1,
+ info);
+
+/*
+ If N is smaller than the minimum divide size SMLSIZ, then solve
+ the problem with another solver.
+*/
+
+ if (*n <= *smlsiz) {
+ nwork = *n * *n + 1;
+ slaset_("A", n, n, &c_b320, &c_b1011, &work[1], n);
+ slasdq_("U", &c__0, n, n, &c__0, nrhs, &d__[1], &e[1], &work[1], n, &
+ work[1], n, &b[b_offset], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ tol = *rcond * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (d__[i__] <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b320, &c_b320, &b[i__ + b_dim1],
+ ldb);
+ } else {
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b1011, &c__1, nrhs, &
+ b[i__ + b_dim1], ldb, info);
+ ++(*rank);
+ }
+/* L40: */
+ }
+ sgemm_("T", "N", n, nrhs, n, &c_b1011, &work[1], n, &b[b_offset], ldb,
+ &c_b320, &work[nwork], n);
+ slacpy_("A", n, nrhs, &work[nwork], n, &b[b_offset], ldb);
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, n, &c__1, &d__[1], n,
+ info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, nrhs, &b[b_offset],
+ ldb, info);
+
+ return 0;
+ }
+
+/* Book-keeping and setting up some constants. */
+
+ nlvl = (integer) (log((real) (*n) / (real) (*smlsiz + 1)) / log(2.f)) + 1;
+
+ smlszp = *smlsiz + 1;
+
+ u = 1;
+ vt = *smlsiz * *n + 1;
+ difl = vt + smlszp * *n;
+ difr = difl + nlvl * *n;
+ z__ = difr + ((nlvl * *n) << (1));
+ c__ = z__ + nlvl * *n;
+ s = c__ + *n;
+ poles = s + *n;
+ givnum = poles + ((nlvl) << (1)) * *n;
+ bx = givnum + ((nlvl) << (1)) * *n;
+ nwork = bx + *n * *nrhs;
+
+ sizei = *n + 1;
+ k = sizei + *n;
+ givptr = k + *n;
+ perm = givptr + *n;
+ givcol = perm + nlvl * *n;
+ iwk = givcol + ((nlvl * *n) << (1));
+
+ st = 1;
+ sqre = 0;
+ icmpq1 = 1;
+ icmpq2 = 0;
+ nsub = 0;
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) < eps) {
+ d__[i__] = r_sign(&eps, &d__[i__]);
+ }
+/* L50: */
+ }
+
+ i__1 = nm1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (((r__1 = e[i__], dabs(r__1)) < eps) || (i__ == nm1)) {
+ ++nsub;
+ iwork[nsub] = st;
+
+/*
+ Subproblem found. First determine its size and then
+ apply divide and conquer on it.
+*/
+
+ if (i__ < nm1) {
+
+/* A subproblem with E(I) small for I < NM1. */
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else if ((r__1 = e[i__], dabs(r__1)) >= eps) {
+
+/* A subproblem with E(NM1) not too small but I = NM1. */
+
+ nsize = *n - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ } else {
+
+/*
+ A subproblem with E(NM1) small. This implies an
+ 1-by-1 subproblem at D(N), which is not solved
+ explicitly.
+*/
+
+ nsize = i__ - st + 1;
+ iwork[sizei + nsub - 1] = nsize;
+ ++nsub;
+ iwork[nsub] = *n;
+ iwork[sizei + nsub - 1] = 1;
+ scopy_(nrhs, &b[*n + b_dim1], ldb, &work[bx + nm1], n);
+ }
+ st1 = st - 1;
+ if (nsize == 1) {
+
+/*
+ This is a 1-by-1 subproblem and is not solved
+ explicitly.
+*/
+
+ scopy_(nrhs, &b[st + b_dim1], ldb, &work[bx + st1], n);
+ } else if (nsize <= *smlsiz) {
+
+/* This is a small subproblem and is solved by SLASDQ. */
+
+ slaset_("A", &nsize, &nsize, &c_b320, &c_b1011, &work[vt +
+ st1], n);
+ slasdq_("U", &c__0, &nsize, &nsize, &c__0, nrhs, &d__[st], &e[
+ st], &work[vt + st1], n, &work[nwork], n, &b[st +
+ b_dim1], ldb, &work[nwork], info);
+ if (*info != 0) {
+ return 0;
+ }
+ slacpy_("A", &nsize, nrhs, &b[st + b_dim1], ldb, &work[bx +
+ st1], n);
+ } else {
+
+/* A large problem. Solve it using divide and conquer. */
+
+ slasda_(&icmpq1, smlsiz, &nsize, &sqre, &d__[st], &e[st], &
+ work[u + st1], n, &work[vt + st1], &iwork[k + st1], &
+ work[difl + st1], &work[difr + st1], &work[z__ + st1],
+ &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum +
+ st1], &work[c__ + st1], &work[s + st1], &work[nwork],
+ &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ bxst = bx + st1;
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &b[st + b_dim1], ldb, &
+ work[bxst], n, &work[u + st1], n, &work[vt + st1], &
+ iwork[k + st1], &work[difl + st1], &work[difr + st1],
+ &work[z__ + st1], &work[poles + st1], &iwork[givptr +
+ st1], &iwork[givcol + st1], n, &iwork[perm + st1], &
+ work[givnum + st1], &work[c__ + st1], &work[s + st1],
+ &work[nwork], &iwork[iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+ st = i__ + 1;
+ }
+/* L60: */
+ }
+
+/* Apply the singular values and treat the tiny ones as zero. */
+
+ tol = *rcond * (r__1 = d__[isamax_(n, &d__[1], &c__1)], dabs(r__1));
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Some of the elements in D can be negative because 1-by-1
+ subproblems were not solved explicitly.
+*/
+
+ if ((r__1 = d__[i__], dabs(r__1)) <= tol) {
+ slaset_("A", &c__1, nrhs, &c_b320, &c_b320, &work[bx + i__ - 1],
+ n);
+ } else {
+ ++(*rank);
+ slascl_("G", &c__0, &c__0, &d__[i__], &c_b1011, &c__1, nrhs, &
+ work[bx + i__ - 1], n, info);
+ }
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* L70: */
+ }
+
+/* Now apply back the right singular vectors. */
+
+ icmpq2 = 1;
+ i__1 = nsub;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ st = iwork[i__];
+ st1 = st - 1;
+ nsize = iwork[sizei + i__ - 1];
+ bxst = bx + st1;
+ if (nsize == 1) {
+ scopy_(nrhs, &work[bxst], n, &b[st + b_dim1], ldb);
+ } else if (nsize <= *smlsiz) {
+ sgemm_("T", "N", &nsize, nrhs, &nsize, &c_b1011, &work[vt + st1],
+ n, &work[bxst], n, &c_b320, &b[st + b_dim1], ldb);
+ } else {
+ slalsa_(&icmpq2, smlsiz, &nsize, nrhs, &work[bxst], n, &b[st +
+ b_dim1], ldb, &work[u + st1], n, &work[vt + st1], &iwork[
+ k + st1], &work[difl + st1], &work[difr + st1], &work[z__
+ + st1], &work[poles + st1], &iwork[givptr + st1], &iwork[
+ givcol + st1], n, &iwork[perm + st1], &work[givnum + st1],
+ &work[c__ + st1], &work[s + st1], &work[nwork], &iwork[
+ iwk], info);
+ if (*info != 0) {
+ return 0;
+ }
+ }
+/* L80: */
+ }
+
+/* Unscale and sort the singular values. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, n, &c__1, &d__[1], n, info);
+ slasrt_("D", n, &d__[1], info);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, n, nrhs, &b[b_offset], ldb,
+ info);
+
+ return 0;
+
+/* End of SLALSD */
+
+} /* slalsd_ */
+
+doublereal slamch_(char *cmach)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ real ret_val;
+
+ /* Builtin functions */
+ double pow_ri(real *, integer *);
+
+ /* Local variables */
+ static real t;
+ static integer it;
+ static real rnd, eps, base;
+ static integer beta;
+ static real emin, prec, emax;
+ static integer imin, imax;
+ static logical lrnd;
+ static real rmin, rmax, rmach;
+ extern logical lsame_(char *, char *);
+ static real small, sfmin;
+ extern /* Subroutine */ int slamc2_(integer *, integer *, logical *, real
+ *, integer *, real *, integer *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMCH determines single precision machine parameters.
+
+ Arguments
+ =========
+
+ CMACH (input) CHARACTER*1
+ Specifies the value to be returned by SLAMCH:
+ = 'E' or 'e', SLAMCH := eps
+ = 'S' or 's , SLAMCH := sfmin
+ = 'B' or 'b', SLAMCH := base
+ = 'P' or 'p', SLAMCH := eps*base
+ = 'N' or 'n', SLAMCH := t
+ = 'R' or 'r', SLAMCH := rnd
+ = 'M' or 'm', SLAMCH := emin
+ = 'U' or 'u', SLAMCH := rmin
+ = 'L' or 'l', SLAMCH := emax
+ = 'O' or 'o', SLAMCH := rmax
+
+ where
+
+ eps = relative machine precision
+ sfmin = safe minimum, such that 1/sfmin does not overflow
+ base = base of the machine
+ prec = eps*base
+ t = number of (base) digits in the mantissa
+ rnd = 1.0 when rounding occurs in addition, 0.0 otherwise
+ emin = minimum exponent before (gradual) underflow
+ rmin = underflow threshold - base**(emin-1)
+ emax = largest exponent before overflow
+ rmax = overflow threshold - (base**emax)*(1-eps)
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ slamc2_(&beta, &it, &lrnd, &eps, &imin, &rmin, &imax, &rmax);
+ base = (real) beta;
+ t = (real) it;
+ if (lrnd) {
+ rnd = 1.f;
+ i__1 = 1 - it;
+ eps = pow_ri(&base, &i__1) / 2;
+ } else {
+ rnd = 0.f;
+ i__1 = 1 - it;
+ eps = pow_ri(&base, &i__1);
+ }
+ prec = eps * base;
+ emin = (real) imin;
+ emax = (real) imax;
+ sfmin = rmin;
+ small = 1.f / rmax;
+ if (small >= sfmin) {
+
+/*
+ Use SMALL plus a bit, to avoid the possibility of rounding
+ causing overflow when computing 1/sfmin.
+*/
+
+ sfmin = small * (eps + 1.f);
+ }
+ }
+
+ if (lsame_(cmach, "E")) {
+ rmach = eps;
+ } else if (lsame_(cmach, "S")) {
+ rmach = sfmin;
+ } else if (lsame_(cmach, "B")) {
+ rmach = base;
+ } else if (lsame_(cmach, "P")) {
+ rmach = prec;
+ } else if (lsame_(cmach, "N")) {
+ rmach = t;
+ } else if (lsame_(cmach, "R")) {
+ rmach = rnd;
+ } else if (lsame_(cmach, "M")) {
+ rmach = emin;
+ } else if (lsame_(cmach, "U")) {
+ rmach = rmin;
+ } else if (lsame_(cmach, "L")) {
+ rmach = emax;
+ } else if (lsame_(cmach, "O")) {
+ rmach = rmax;
+ }
+
+ ret_val = rmach;
+ return ret_val;
+
+/* End of SLAMCH */
+
+} /* slamch_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc1_(integer *beta, integer *t, logical *rnd, logical
+ *ieee1)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Local variables */
+ static real a, b, c__, f, t1, t2;
+ static integer lt;
+ static real one, qtr;
+ static logical lrnd;
+ static integer lbeta;
+ static real savec;
+ static logical lieee1;
+ extern doublereal slamc3_(real *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMC1 determines the machine parameters given by BETA, T, RND, and
+ IEEE1.
+
+ Arguments
+ =========
+
+ BETA (output) INTEGER
+ The base of the machine.
+
+ T (output) INTEGER
+ The number of ( BETA ) digits in the mantissa.
+
+ RND (output) LOGICAL
+ Specifies whether proper rounding ( RND = .TRUE. ) or
+ chopping ( RND = .FALSE. ) occurs in addition. This may not
+ be a reliable guide to the way in which the machine performs
+ its arithmetic.
+
+ IEEE1 (output) LOGICAL
+ Specifies whether rounding appears to be done in the IEEE
+ 'round to nearest' style.
+
+ Further Details
+ ===============
+
+ The routine is based on the routine ENVRON by Malcolm and
+ incorporates suggestions by Gentleman and Marovich. See
+
+ Malcolm M. A. (1972) Algorithms to reveal properties of
+ floating-point arithmetic. Comms. of the ACM, 15, 949-951.
+
+ Gentleman W. M. and Marovich S. B. (1974) More on algorithms
+ that reveal properties of floating point arithmetic units.
+ Comms. of the ACM, 17, 276-277.
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ one = 1.f;
+
+/*
+ LBETA, LIEEE1, LT and LRND are the local values of BETA,
+ IEEE1, T and RND.
+
+ Throughout this routine we use the function SLAMC3 to ensure
+ that relevant values are stored and not held in registers, or
+ are not affected by optimizers.
+
+ Compute a = 2.0**m with the smallest positive integer m such
+ that
+
+ fl( a + 1.0 ) = a.
+*/
+
+ a = 1.f;
+ c__ = 1.f;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L10:
+ if (c__ == one) {
+ a *= 2;
+ c__ = slamc3_(&a, &one);
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ goto L10;
+ }
+/*
+ + END WHILE
+
+ Now compute b = 2.0**m with the smallest positive integer m
+ such that
+
+ fl( a + b ) .gt. a.
+*/
+
+ b = 1.f;
+ c__ = slamc3_(&a, &b);
+
+/* + WHILE( C.EQ.A )LOOP */
+L20:
+ if (c__ == a) {
+ b *= 2;
+ c__ = slamc3_(&a, &b);
+ goto L20;
+ }
+/*
+ + END WHILE
+
+ Now compute the base. a and c are neighbouring floating point
+ numbers in the interval ( beta**t, beta**( t + 1 ) ) and so
+ their difference is beta. Adding 0.25 to c is to ensure that it
+ is truncated to beta and not ( beta - 1 ).
+*/
+
+ qtr = one / 4;
+ savec = c__;
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ lbeta = c__ + qtr;
+
+/*
+ Now determine whether rounding or chopping occurs, by adding a
+ bit less than beta/2 and a bit more than beta/2 to a.
+*/
+
+ b = (real) lbeta;
+ r__1 = b / 2;
+ r__2 = -b / 100;
+ f = slamc3_(&r__1, &r__2);
+ c__ = slamc3_(&f, &a);
+ if (c__ == a) {
+ lrnd = TRUE_;
+ } else {
+ lrnd = FALSE_;
+ }
+ r__1 = b / 2;
+ r__2 = b / 100;
+ f = slamc3_(&r__1, &r__2);
+ c__ = slamc3_(&f, &a);
+ if (lrnd && c__ == a) {
+ lrnd = FALSE_;
+ }
+
+/*
+ Try and decide whether rounding is done in the IEEE 'round to
+ nearest' style. B/2 is half a unit in the last place of the two
+ numbers A and SAVEC. Furthermore, A is even, i.e. has last bit
+ zero, and SAVEC is odd. Thus adding B/2 to A should not change
+ A, but adding B/2 to SAVEC should change SAVEC.
+*/
+
+ r__1 = b / 2;
+ t1 = slamc3_(&r__1, &a);
+ r__1 = b / 2;
+ t2 = slamc3_(&r__1, &savec);
+ lieee1 = t1 == a && t2 > savec && lrnd;
+
+/*
+ Now find the mantissa, t. It should be the integer part of
+ log to the base beta of a, however it is safer to determine t
+ by powering. So we find t as the smallest positive integer for
+ which
+
+ fl( beta**t + 1.0 ) = 1.0.
+*/
+
+ lt = 0;
+ a = 1.f;
+ c__ = 1.f;
+
+/* + WHILE( C.EQ.ONE )LOOP */
+L30:
+ if (c__ == one) {
+ ++lt;
+ a *= lbeta;
+ c__ = slamc3_(&a, &one);
+ r__1 = -a;
+ c__ = slamc3_(&c__, &r__1);
+ goto L30;
+ }
+/* + END WHILE */
+
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *ieee1 = lieee1;
+ return 0;
+
+/* End of SLAMC1 */
+
+} /* slamc1_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc2_(integer *beta, integer *t, logical *rnd, real *
+ eps, integer *emin, real *rmin, integer *emax, real *rmax)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+ static logical iwarn = FALSE_;
+
+ /* Format strings */
+ static char fmt_9999[] = "(//\002 WARNING. The value EMIN may be incorre"
+ "ct:-\002,\002 EMIN = \002,i8,/\002 If, after inspection, the va"
+ "lue EMIN looks\002,\002 acceptable please comment out \002,/\002"
+ " the IF block as marked within the code of routine\002,\002 SLAM"
+ "C2,\002,/\002 otherwise supply EMIN explicitly.\002,/)";
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double pow_ri(real *, integer *);
+ integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void);
+
+ /* Local variables */
+ static real a, b, c__;
+ static integer i__, lt;
+ static real one, two;
+ static logical ieee;
+ static real half;
+ static logical lrnd;
+ static real leps, zero;
+ static integer lbeta;
+ static real rbase;
+ static integer lemin, lemax, gnmin;
+ static real small;
+ static integer gpmin;
+ static real third, lrmin, lrmax, sixth;
+ static logical lieee1;
+ extern /* Subroutine */ int slamc1_(integer *, integer *, logical *,
+ logical *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slamc4_(integer *, real *, integer *),
+ slamc5_(integer *, integer *, integer *, logical *, integer *,
+ real *);
+ static integer ngnmin, ngpmin;
+
+ /* Fortran I/O blocks */
+ static cilist io___3081 = { 0, 6, 0, fmt_9999, 0 };
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMC2 determines the machine parameters specified in its argument
+ list.
+
+ Arguments
+ =========
+
+ BETA (output) INTEGER
+ The base of the machine.
+
+ T (output) INTEGER
+ The number of ( BETA ) digits in the mantissa.
+
+ RND (output) LOGICAL
+ Specifies whether proper rounding ( RND = .TRUE. ) or
+ chopping ( RND = .FALSE. ) occurs in addition. This may not
+ be a reliable guide to the way in which the machine performs
+ its arithmetic.
+
+ EPS (output) REAL
+ The smallest positive number such that
+
+ fl( 1.0 - EPS ) .LT. 1.0,
+
+ where fl denotes the computed value.
+
+ EMIN (output) INTEGER
+ The minimum exponent before (gradual) underflow occurs.
+
+ RMIN (output) REAL
+ The smallest normalized number for the machine, given by
+ BASE**( EMIN - 1 ), where BASE is the floating point value
+ of BETA.
+
+ EMAX (output) INTEGER
+ The maximum exponent before overflow occurs.
+
+ RMAX (output) REAL
+ The largest positive number for the machine, given by
+ BASE**EMAX * ( 1 - EPS ), where BASE is the floating point
+ value of BETA.
+
+ Further Details
+ ===============
+
+ The computation of EPS is based on a routine PARANOIA by
+ W. Kahan of the University of California at Berkeley.
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ zero = 0.f;
+ one = 1.f;
+ two = 2.f;
+
+/*
+ LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values of
+ BETA, T, RND, EPS, EMIN and RMIN.
+
+ Throughout this routine we use the function SLAMC3 to ensure
+ that relevant values are stored and not held in registers, or
+ are not affected by optimizers.
+
+ SLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1.
+*/
+
+ slamc1_(&lbeta, &lt, &lrnd, &lieee1);
+
+/* Start to find EPS. */
+
+ b = (real) lbeta;
+ i__1 = -lt;
+ a = pow_ri(&b, &i__1);
+ leps = a;
+
+/* Try some tricks to see whether or not this is the correct EPS. */
+
+ b = two / 3;
+ half = one / 2;
+ r__1 = -half;
+ sixth = slamc3_(&b, &r__1);
+ third = slamc3_(&sixth, &sixth);
+ r__1 = -half;
+ b = slamc3_(&third, &r__1);
+ b = slamc3_(&b, &sixth);
+ b = dabs(b);
+ if (b < leps) {
+ b = leps;
+ }
+
+ leps = 1.f;
+
+/* + WHILE( ( LEPS.GT.B ).AND.( B.GT.ZERO ) )LOOP */
+L10:
+ if (leps > b && b > zero) {
+ leps = b;
+ r__1 = half * leps;
+/* Computing 5th power */
+ r__3 = two, r__4 = r__3, r__3 *= r__3;
+/* Computing 2nd power */
+ r__5 = leps;
+ r__2 = r__4 * (r__3 * r__3) * (r__5 * r__5);
+ c__ = slamc3_(&r__1, &r__2);
+ r__1 = -c__;
+ c__ = slamc3_(&half, &r__1);
+ b = slamc3_(&half, &c__);
+ r__1 = -b;
+ c__ = slamc3_(&half, &r__1);
+ b = slamc3_(&half, &c__);
+ goto L10;
+ }
+/* + END WHILE */
+
+ if (a < leps) {
+ leps = a;
+ }
+
+/*
+ Computation of EPS complete.
+
+ Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3)).
+ Keep dividing A by BETA until (gradual) underflow occurs. This
+ is detected when we cannot recover the previous A.
+*/
+
+ rbase = one / lbeta;
+ small = one;
+ for (i__ = 1; i__ <= 3; ++i__) {
+ r__1 = small * rbase;
+ small = slamc3_(&r__1, &zero);
+/* L20: */
+ }
+ a = slamc3_(&one, &small);
+ slamc4_(&ngpmin, &one, &lbeta);
+ r__1 = -one;
+ slamc4_(&ngnmin, &r__1, &lbeta);
+ slamc4_(&gpmin, &a, &lbeta);
+ r__1 = -a;
+ slamc4_(&gnmin, &r__1, &lbeta);
+ ieee = FALSE_;
+
+ if (ngpmin == ngnmin && gpmin == gnmin) {
+ if (ngpmin == gpmin) {
+ lemin = ngpmin;
+/*
+ ( Non twos-complement machines, no gradual underflow;
+ e.g., VAX )
+*/
+ } else if (gpmin - ngpmin == 3) {
+ lemin = ngpmin - 1 + lt;
+ ieee = TRUE_;
+/*
+ ( Non twos-complement machines, with gradual underflow;
+ e.g., IEEE standard followers )
+*/
+ } else {
+ lemin = min(ngpmin,gpmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if (ngpmin == gpmin && ngnmin == gnmin) {
+ if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) {
+ lemin = max(ngpmin,ngnmin);
+/*
+ ( Twos-complement machines, no gradual underflow;
+ e.g., CYBER 205 )
+*/
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1 && gpmin == gnmin)
+ {
+ if (gpmin - min(ngpmin,ngnmin) == 3) {
+ lemin = max(ngpmin,ngnmin) - 1 + lt;
+/*
+ ( Twos-complement machines with gradual underflow;
+ no known machine )
+*/
+ } else {
+ lemin = min(ngpmin,ngnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+
+ } else {
+/* Computing MIN */
+ i__1 = min(ngpmin,ngnmin), i__1 = min(i__1,gpmin);
+ lemin = min(i__1,gnmin);
+/* ( A guess; no known machine ) */
+ iwarn = TRUE_;
+ }
+/*
+ **
+ Comment out this if block if EMIN is ok
+*/
+ if (iwarn) {
+ first = TRUE_;
+ s_wsfe(&io___3081);
+ do_fio(&c__1, (char *)&lemin, (ftnlen)sizeof(integer));
+ e_wsfe();
+ }
+/*
+ **
+
+ Assume IEEE arithmetic if we found denormalised numbers above,
+ or if arithmetic seems to round in the IEEE style, determined
+ in routine SLAMC1. A true IEEE machine should have both things
+ true; however, faulty machines may have one or the other.
+*/
+
+ ieee = (ieee) || (lieee1);
+
+/*
+ Compute RMIN by successive division by BETA. We could compute
+ RMIN as BASE**( EMIN - 1 ), but some machines underflow during
+ this computation.
+*/
+
+ lrmin = 1.f;
+ i__1 = 1 - lemin;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = lrmin * rbase;
+ lrmin = slamc3_(&r__1, &zero);
+/* L30: */
+ }
+
+/* Finally, call SLAMC5 to compute EMAX and RMAX. */
+
+ slamc5_(&lbeta, &lt, &lemin, &ieee, &lemax, &lrmax);
+ }
+
+ *beta = lbeta;
+ *t = lt;
+ *rnd = lrnd;
+ *eps = leps;
+ *emin = lemin;
+ *rmin = lrmin;
+ *emax = lemax;
+ *rmax = lrmax;
+
+ return 0;
+
+
+/* End of SLAMC2 */
+
+} /* slamc2_ */
+
+
+/* *********************************************************************** */
+
+doublereal slamc3_(real *a, real *b)
+{
+ /* System generated locals */
+ real ret_val;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMC3 is intended to force A and B to be stored prior to doing
+ the addition of A and B , for use in situations where optimizers
+ might hold one of these in a register.
+
+ Arguments
+ =========
+
+ A, B (input) REAL
+ The values A and B.
+
+ =====================================================================
+*/
+
+
+ ret_val = *a + *b;
+
+ return ret_val;
+
+/* End of SLAMC3 */
+
+} /* slamc3_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc4_(integer *emin, real *start, integer *base)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ static real a;
+ static integer i__;
+ static real b1, b2, c1, c2, d1, d2, one, zero, rbase;
+ extern doublereal slamc3_(real *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMC4 is a service routine for SLAMC2.
+
+ Arguments
+ =========
+
+ EMIN (output) EMIN
+ The minimum exponent before (gradual) underflow, computed by
+ setting A = START and dividing by BASE until the previous A
+ can not be recovered.
+
+ START (input) REAL
+ The starting point for determining EMIN.
+
+ BASE (input) INTEGER
+ The base of the machine.
+
+ =====================================================================
+*/
+
+
+ a = *start;
+ one = 1.f;
+ rbase = one / *base;
+ zero = 0.f;
+ *emin = 1;
+ r__1 = a * rbase;
+ b1 = slamc3_(&r__1, &zero);
+ c1 = a;
+ c2 = a;
+ d1 = a;
+ d2 = a;
+/*
+ + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND.
+ $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP
+*/
+L10:
+ if (c1 == a && c2 == a && d1 == a && d2 == a) {
+ --(*emin);
+ a = b1;
+ r__1 = a / *base;
+ b1 = slamc3_(&r__1, &zero);
+ r__1 = b1 * *base;
+ c1 = slamc3_(&r__1, &zero);
+ d1 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d1 += b1;
+/* L20: */
+ }
+ r__1 = a * rbase;
+ b2 = slamc3_(&r__1, &zero);
+ r__1 = b2 / rbase;
+ c2 = slamc3_(&r__1, &zero);
+ d2 = zero;
+ i__1 = *base;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d2 += b2;
+/* L30: */
+ }
+ goto L10;
+ }
+/* + END WHILE */
+
+ return 0;
+
+/* End of SLAMC4 */
+
+} /* slamc4_ */
+
+
+/* *********************************************************************** */
+
+/* Subroutine */ int slamc5_(integer *beta, integer *p, integer *emin,
+ logical *ieee, integer *emax, real *rmax)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Local variables */
+ static integer i__;
+ static real y, z__;
+ static integer try__, lexp;
+ static real oldy;
+ static integer uexp, nbits;
+ extern doublereal slamc3_(real *, real *);
+ static real recbas;
+ static integer exbits, expsum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAMC5 attempts to compute RMAX, the largest machine floating-point
+ number, without overflow. It assumes that EMAX + abs(EMIN) sum
+ approximately to a power of 2. It will fail on machines where this
+ assumption does not hold, for example, the Cyber 205 (EMIN = -28625,
+ EMAX = 28718). It will also fail if the value supplied for EMIN is
+ too large (i.e. too close to zero), probably with overflow.
+
+ Arguments
+ =========
+
+ BETA (input) INTEGER
+ The base of floating-point arithmetic.
+
+ P (input) INTEGER
+ The number of base BETA digits in the mantissa of a
+ floating-point value.
+
+ EMIN (input) INTEGER
+ The minimum exponent before (gradual) underflow.
+
+ IEEE (input) LOGICAL
+ A logical flag specifying whether or not the arithmetic
+ system is thought to comply with the IEEE standard.
+
+ EMAX (output) INTEGER
+ The largest exponent before overflow
+
+ RMAX (output) REAL
+ The largest machine floating-point number.
+
+ =====================================================================
+
+
+ First compute LEXP and UEXP, two powers of 2 that bound
+ abs(EMIN). We then assume that EMAX + abs(EMIN) will sum
+ approximately to the bound that is closest to abs(EMIN).
+ (EMAX is the exponent of the required number RMAX).
+*/
+
+ lexp = 1;
+ exbits = 1;
+L10:
+ try__ = (lexp) << (1);
+ if (try__ <= -(*emin)) {
+ lexp = try__;
+ ++exbits;
+ goto L10;
+ }
+ if (lexp == -(*emin)) {
+ uexp = lexp;
+ } else {
+ uexp = try__;
+ ++exbits;
+ }
+
+/*
+ Now -LEXP is less than or equal to EMIN, and -UEXP is greater
+ than or equal to EMIN. EXBITS is the number of bits needed to
+ store the exponent.
+*/
+
+ if (uexp + *emin > -lexp - *emin) {
+ expsum = (lexp) << (1);
+ } else {
+ expsum = (uexp) << (1);
+ }
+
+/*
+ EXPSUM is the exponent range, approximately equal to
+ EMAX - EMIN + 1 .
+*/
+
+ *emax = expsum + *emin - 1;
+ nbits = exbits + 1 + *p;
+
+/*
+ NBITS is the total number of bits needed to store a
+ floating-point number.
+*/
+
+ if (nbits % 2 == 1 && *beta == 2) {
+
+/*
+ Either there are an odd number of bits used to store a
+ floating-point number, which is unlikely, or some bits are
+ not used in the representation of numbers, which is possible,
+ (e.g. Cray machines) or the mantissa has an implicit bit,
+ (e.g. IEEE machines, Dec Vax machines), which is perhaps the
+ most likely. We have to assume the last alternative.
+ If this is true, then we need to reduce EMAX by one because
+ there must be some way of representing zero in an implicit-bit
+ system. On machines like Cray, we are reducing EMAX by one
+ unnecessarily.
+*/
+
+ --(*emax);
+ }
+
+ if (*ieee) {
+
+/*
+ Assume we are on an IEEE machine which reserves one exponent
+ for infinity and NaN.
+*/
+
+ --(*emax);
+ }
+
+/*
+ Now create RMAX, the largest machine number, which should
+ be equal to (1.0 - BETA**(-P)) * BETA**EMAX .
+
+ First compute 1.0 - BETA**(-P), being careful that the
+ result is less than 1.0 .
+*/
+
+ recbas = 1.f / *beta;
+ z__ = *beta - 1.f;
+ y = 0.f;
+ i__1 = *p;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__ *= recbas;
+ if (y < 1.f) {
+ oldy = y;
+ }
+ y = slamc3_(&y, &z__);
+/* L20: */
+ }
+ if (y >= 1.f) {
+ y = oldy;
+ }
+
+/* Now multiply by BETA**EMAX to get RMAX. */
+
+ i__1 = *emax;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__1 = y * *beta;
+ y = slamc3_(&r__1, &c_b320);
+/* L30: */
+ }
+
+ *rmax = y;
+ return 0;
+
+/* End of SLAMC5 */
+
+} /* slamc5_ */
+
+/* Subroutine */ int slamrg_(integer *n1, integer *n2, real *a, integer *
+ strd1, integer *strd2, integer *index)
+{
+ /* System generated locals */
+ integer i__1;
+
+ /* Local variables */
+ static integer i__, ind1, ind2, n1sv, n2sv;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAMRG will create a permutation list which will merge the elements
+ of A (which is composed of two independently sorted sets) into a
+ single set which is sorted in ascending order.
+
+ Arguments
+ =========
+
+ N1 (input) INTEGER
+ N2 (input) INTEGER
+ These arguements contain the respective lengths of the two
+ sorted lists to be merged.
+
+ A (input) REAL array, dimension (N1+N2)
+ The first N1 elements of A contain a list of numbers which
+ are sorted in either ascending or descending order. Likewise
+ for the final N2 elements.
+
+ STRD1 (input) INTEGER
+ STRD2 (input) INTEGER
+ These are the strides to be taken through the array A.
+ Allowable strides are 1 and -1. They indicate whether a
+ subset of A is sorted in ascending (STRDx = 1) or descending
+ (STRDx = -1) order.
+
+ INDEX (output) INTEGER array, dimension (N1+N2)
+ On exit this array will contain a permutation such that
+ if B( I ) = A( INDEX( I ) ) for I=1,N1+N2, then B will be
+ sorted in ascending order.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --index;
+ --a;
+
+ /* Function Body */
+ n1sv = *n1;
+ n2sv = *n2;
+ if (*strd1 > 0) {
+ ind1 = 1;
+ } else {
+ ind1 = *n1;
+ }
+ if (*strd2 > 0) {
+ ind2 = *n1 + 1;
+ } else {
+ ind2 = *n1 + *n2;
+ }
+ i__ = 1;
+/* while ( (N1SV > 0) & (N2SV > 0) ) */
+L10:
+ if (n1sv > 0 && n2sv > 0) {
+ if (a[ind1] <= a[ind2]) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *strd1;
+ --n1sv;
+ } else {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *strd2;
+ --n2sv;
+ }
+ goto L10;
+ }
+/* end while */
+ if (n1sv == 0) {
+ i__1 = n2sv;
+ for (n1sv = 1; n1sv <= i__1; ++n1sv) {
+ index[i__] = ind2;
+ ++i__;
+ ind2 += *strd2;
+/* L20: */
+ }
+ } else {
+/* N2SV .EQ. 0 */
+ i__1 = n1sv;
+ for (n2sv = 1; n2sv <= i__1; ++n2sv) {
+ index[i__] = ind1;
+ ++i__;
+ ind1 += *strd1;
+/* L30: */
+ }
+ }
+
+ return 0;
+
+/* End of SLAMRG */
+
+} /* slamrg_ */
+
+doublereal slange_(char *norm, integer *m, integer *n, real *a, integer *lda,
+ real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLANGE returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real matrix A.
+
+ Description
+ ===========
+
+ SLANGE returns the value
+
+ SLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in SLANGE as described
+ above.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0. When M = 0,
+ SLANGE is set to zero.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0. When N = 0,
+ SLANGE is set to zero.
+
+ A (input) REAL array, dimension (LDA,N)
+ The m by n matrix A.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(M,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= M when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (min(*m,*n) == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ slassq_(m, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANGE */
+
+} /* slange_ */
+
+doublereal slanhs_(char *norm, integer *n, real *a, integer *lda, real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLANHS returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ Hessenberg matrix A.
+
+ Description
+ ===========
+
+ SLANHS returns the value
+
+ SLANHS = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in SLANHS as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, SLANHS is
+ set to zero.
+
+ A (input) REAL array, dimension (LDA,N)
+ The n by n upper Hessenberg matrix A; the part of A below the
+ first sub-diagonal is not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I'; otherwise, WORK is not
+ referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
+
+/* Find norm1(A). */
+
+ value = 0.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ sum += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L30: */
+ }
+ value = dmax(value,sum);
+/* L40: */
+ }
+ } else if (lsame_(norm, "I")) {
+
+/* Find normI(A). */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L50: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] += (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+/* L70: */
+ }
+ value = 0.f;
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L80: */
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = *n, i__4 = j + 1;
+ i__2 = min(i__3,i__4);
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L90: */
+ }
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANHS */
+
+} /* slanhs_ */
+
+doublereal slanst_(char *norm, integer *n, real *d__, real *e)
+{
+ /* System generated locals */
+ integer i__1;
+ real ret_val, r__1, r__2, r__3, r__4, r__5;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static real sum, scale;
+ extern logical lsame_(char *, char *);
+ static real anorm;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLANST returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric tridiagonal matrix A.
+
+ Description
+ ===========
+
+ SLANST returns the value
+
+ SLANST = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in SLANST as described
+ above.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, SLANST is
+ set to zero.
+
+ D (input) REAL array, dimension (N)
+ The diagonal elements of A.
+
+ E (input) REAL array, dimension (N-1)
+ The (n-1) sub-diagonal or super-diagonal elements of A.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ if (*n <= 0) {
+ anorm = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ anorm = (r__1 = d__[*n], dabs(r__1));
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = d__[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* Computing MAX */
+ r__2 = anorm, r__3 = (r__1 = e[i__], dabs(r__1));
+ anorm = dmax(r__2,r__3);
+/* L10: */
+ }
+ } else if (((lsame_(norm, "O")) || (*(unsigned char
+ *)norm == '1')) || (lsame_(norm, "I"))) {
+
+/* Find norm1(A). */
+
+ if (*n == 1) {
+ anorm = dabs(d__[1]);
+ } else {
+/* Computing MAX */
+ r__3 = dabs(d__[1]) + dabs(e[1]), r__4 = (r__1 = e[*n - 1], dabs(
+ r__1)) + (r__2 = d__[*n], dabs(r__2));
+ anorm = dmax(r__3,r__4);
+ i__1 = *n - 1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__4 = anorm, r__5 = (r__1 = d__[i__], dabs(r__1)) + (r__2 =
+ e[i__], dabs(r__2)) + (r__3 = e[i__ - 1], dabs(r__3));
+ anorm = dmax(r__4,r__5);
+/* L20: */
+ }
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (*n > 1) {
+ i__1 = *n - 1;
+ slassq_(&i__1, &e[1], &c__1, &scale, &sum);
+ sum *= 2;
+ }
+ slassq_(n, &d__[1], &c__1, &scale, &sum);
+ anorm = scale * sqrt(sum);
+ }
+
+ ret_val = anorm;
+ return ret_val;
+
+/* End of SLANST */
+
+} /* slanst_ */
+
+doublereal slansy_(char *norm, char *uplo, integer *n, real *a, integer *lda,
+ real *work)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j;
+ static real sum, absa, scale;
+ extern logical lsame_(char *, char *);
+ static real value;
+ extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
+ real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLANSY returns the value of the one norm, or the Frobenius norm, or
+ the infinity norm, or the element of largest absolute value of a
+ real symmetric matrix A.
+
+ Description
+ ===========
+
+ SLANSY returns the value
+
+ SLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm'
+ (
+ ( norm1(A), NORM = '1', 'O' or 'o'
+ (
+ ( normI(A), NORM = 'I' or 'i'
+ (
+ ( normF(A), NORM = 'F', 'f', 'E' or 'e'
+
+ where norm1 denotes the one norm of a matrix (maximum column sum),
+ normI denotes the infinity norm of a matrix (maximum row sum) and
+ normF denotes the Frobenius norm of a matrix (square root of sum of
+ squares). Note that max(abs(A(i,j))) is not a matrix norm.
+
+ Arguments
+ =========
+
+ NORM (input) CHARACTER*1
+ Specifies the value to be returned in SLANSY as described
+ above.
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is to be referenced.
+ = 'U': Upper triangular part of A is referenced
+ = 'L': Lower triangular part of A is referenced
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0. When N = 0, SLANSY is
+ set to zero.
+
+ A (input) REAL array, dimension (LDA,N)
+ The symmetric matrix A. If UPLO = 'U', the leading n by n
+ upper triangular part of A contains the upper triangular part
+ of the matrix A, and the strictly lower triangular part of A
+ is not referenced. If UPLO = 'L', the leading n by n lower
+ triangular part of A contains the lower triangular part of
+ the matrix A, and the strictly upper triangular part of A is
+ not referenced.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(N,1).
+
+ WORK (workspace) REAL array, dimension (LWORK),
+ where LWORK >= N when NORM = 'I' or '1' or 'O'; otherwise,
+ WORK is not referenced.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --work;
+
+ /* Function Body */
+ if (*n == 0) {
+ value = 0.f;
+ } else if (lsame_(norm, "M")) {
+
+/* Find max(abs(A(i,j))). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = j;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = j; i__ <= i__2; ++i__) {
+/* Computing MAX */
+ r__2 = value, r__3 = (r__1 = a[i__ + j * a_dim1], dabs(
+ r__1));
+ value = dmax(r__2,r__3);
+/* L30: */
+ }
+/* L40: */
+ }
+ }
+ } else if (((lsame_(norm, "I")) || (lsame_(norm,
+ "O"))) || (*(unsigned char *)norm == '1')) {
+
+/* Find normI(A) ( = norm1(A), since A is symmetric). */
+
+ value = 0.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L50: */
+ }
+ work[j] = sum + (r__1 = a[j + j * a_dim1], dabs(r__1));
+/* L60: */
+ }
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = value, r__2 = work[i__];
+ value = dmax(r__1,r__2);
+/* L70: */
+ }
+ } else {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ work[i__] = 0.f;
+/* L80: */
+ }
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = work[j] + (r__1 = a[j + j * a_dim1], dabs(r__1));
+ i__2 = *n;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ absa = (r__1 = a[i__ + j * a_dim1], dabs(r__1));
+ sum += absa;
+ work[i__] += absa;
+/* L90: */
+ }
+ value = dmax(value,sum);
+/* L100: */
+ }
+ }
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
+
+/* Find normF(A). */
+
+ scale = 0.f;
+ sum = 1.f;
+ if (lsame_(uplo, "U")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ i__2 = j - 1;
+ slassq_(&i__2, &a[j * a_dim1 + 1], &c__1, &scale, &sum);
+/* L110: */
+ }
+ } else {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n - j;
+ slassq_(&i__2, &a[j + 1 + j * a_dim1], &c__1, &scale, &sum);
+/* L120: */
+ }
+ }
+ sum *= 2;
+ i__1 = *lda + 1;
+ slassq_(n, &a[a_offset], &i__1, &scale, &sum);
+ value = scale * sqrt(sum);
+ }
+
+ ret_val = value;
+ return ret_val;
+
+/* End of SLANSY */
+
+} /* slansy_ */
+
+/* Subroutine */ int slanv2_(real *a, real *b, real *c__, real *d__, real *
+ rt1r, real *rt1i, real *rt2r, real *rt2i, real *cs, real *sn)
+{
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double r_sign(real *, real *), sqrt(doublereal);
+
+ /* Local variables */
+ static real p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, temp,
+ scale, bcmax, bcmis, sigma;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLANV2 computes the Schur factorization of a real 2-by-2 nonsymmetric
+ matrix in standard form:
+
+ [ A B ] = [ CS -SN ] [ AA BB ] [ CS SN ]
+ [ C D ] [ SN CS ] [ CC DD ] [-SN CS ]
+
+ where either
+ 1) CC = 0 so that AA and DD are real eigenvalues of the matrix, or
+ 2) AA = DD and BB*CC < 0, so that AA + or - sqrt(BB*CC) are complex
+ conjugate eigenvalues.
+
+ Arguments
+ =========
+
+ A (input/output) REAL
+ B (input/output) REAL
+ C (input/output) REAL
+ D (input/output) REAL
+ On entry, the elements of the input matrix.
+ On exit, they are overwritten by the elements of the
+ standardised Schur form.
+
+ RT1R (output) REAL
+ RT1I (output) REAL
+ RT2R (output) REAL
+ RT2I (output) REAL
+ The real and imaginary parts of the eigenvalues. If the
+ eigenvalues are a complex conjugate pair, RT1I > 0.
+
+ CS (output) REAL
+ SN (output) REAL
+ Parameters of the rotation matrix.
+
+ Further Details
+ ===============
+
+ Modified by V. Sima, Research Institute for Informatics, Bucharest,
+ Romania, to reduce the risk of cancellation errors,
+ when computing real eigenvalues, and to ensure, if possible, that
+ abs(RT1R) >= abs(RT2R).
+
+ =====================================================================
+*/
+
+
+ eps = slamch_("P");
+ if (*c__ == 0.f) {
+ *cs = 1.f;
+ *sn = 0.f;
+ goto L10;
+
+ } else if (*b == 0.f) {
+
+/* Swap rows and columns */
+
+ *cs = 0.f;
+ *sn = 1.f;
+ temp = *d__;
+ *d__ = *a;
+ *a = temp;
+ *b = -(*c__);
+ *c__ = 0.f;
+ goto L10;
+ } else if (*a - *d__ == 0.f && r_sign(&c_b1011, b) != r_sign(&c_b1011,
+ c__)) {
+ *cs = 1.f;
+ *sn = 0.f;
+ goto L10;
+ } else {
+
+ temp = *a - *d__;
+ p = temp * .5f;
+/* Computing MAX */
+ r__1 = dabs(*b), r__2 = dabs(*c__);
+ bcmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = dabs(*b), r__2 = dabs(*c__);
+ bcmis = dmin(r__1,r__2) * r_sign(&c_b1011, b) * r_sign(&c_b1011, c__);
+/* Computing MAX */
+ r__1 = dabs(p);
+ scale = dmax(r__1,bcmax);
+ z__ = p / scale * p + bcmax / scale * bcmis;
+
+/*
+ If Z is of the order of the machine accuracy, postpone the
+ decision on the nature of eigenvalues
+*/
+
+ if (z__ >= eps * 4.f) {
+
+/* Real eigenvalues. Compute A and D. */
+
+ r__1 = sqrt(scale) * sqrt(z__);
+ z__ = p + r_sign(&r__1, &p);
+ *a = *d__ + z__;
+ *d__ -= bcmax / z__ * bcmis;
+
+/* Compute B and the rotation matrix */
+
+ tau = slapy2_(c__, &z__);
+ *cs = z__ / tau;
+ *sn = *c__ / tau;
+ *b -= *c__;
+ *c__ = 0.f;
+ } else {
+
+/*
+ Complex eigenvalues, or real (almost) equal eigenvalues.
+ Make diagonal elements equal.
+*/
+
+ sigma = *b + *c__;
+ tau = slapy2_(&sigma, &temp);
+ *cs = sqrt((dabs(sigma) / tau + 1.f) * .5f);
+ *sn = -(p / (tau * *cs)) * r_sign(&c_b1011, &sigma);
+
+/*
+ Compute [ AA BB ] = [ A B ] [ CS -SN ]
+ [ CC DD ] [ C D ] [ SN CS ]
+*/
+
+ aa = *a * *cs + *b * *sn;
+ bb = -(*a) * *sn + *b * *cs;
+ cc = *c__ * *cs + *d__ * *sn;
+ dd = -(*c__) * *sn + *d__ * *cs;
+
+/*
+ Compute [ A B ] = [ CS SN ] [ AA BB ]
+ [ C D ] [-SN CS ] [ CC DD ]
+*/
+
+ *a = aa * *cs + cc * *sn;
+ *b = bb * *cs + dd * *sn;
+ *c__ = -aa * *sn + cc * *cs;
+ *d__ = -bb * *sn + dd * *cs;
+
+ temp = (*a + *d__) * .5f;
+ *a = temp;
+ *d__ = temp;
+
+ if (*c__ != 0.f) {
+ if (*b != 0.f) {
+ if (r_sign(&c_b1011, b) == r_sign(&c_b1011, c__)) {
+
+/* Real eigenvalues: reduce to upper triangular form */
+
+ sab = sqrt((dabs(*b)));
+ sac = sqrt((dabs(*c__)));
+ r__1 = sab * sac;
+ p = r_sign(&r__1, c__);
+ tau = 1.f / sqrt((r__1 = *b + *c__, dabs(r__1)));
+ *a = temp + p;
+ *d__ = temp - p;
+ *b -= *c__;
+ *c__ = 0.f;
+ cs1 = sab * tau;
+ sn1 = sac * tau;
+ temp = *cs * cs1 - *sn * sn1;
+ *sn = *cs * sn1 + *sn * cs1;
+ *cs = temp;
+ }
+ } else {
+ *b = -(*c__);
+ *c__ = 0.f;
+ temp = *cs;
+ *cs = -(*sn);
+ *sn = temp;
+ }
+ }
+ }
+
+ }
+
+L10:
+
+/* Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). */
+
+ *rt1r = *a;
+ *rt2r = *d__;
+ if (*c__ == 0.f) {
+ *rt1i = 0.f;
+ *rt2i = 0.f;
+ } else {
+ *rt1i = sqrt((dabs(*b))) * sqrt((dabs(*c__)));
+ *rt2i = -(*rt1i);
+ }
+ return 0;
+
+/* End of SLANV2 */
+
+} /* slanv2_ */
+
+doublereal slapy2_(real *x, real *y)
+{
+ /* System generated locals */
+ real ret_val, r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real w, z__, xabs, yabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAPY2 returns sqrt(x**2+y**2), taking care not to cause unnecessary
+ overflow.
+
+ Arguments
+ =========
+
+ X (input) REAL
+ Y (input) REAL
+ X and Y specify the values x and y.
+
+ =====================================================================
+*/
+
+
+ xabs = dabs(*x);
+ yabs = dabs(*y);
+ w = dmax(xabs,yabs);
+ z__ = dmin(xabs,yabs);
+ if (z__ == 0.f) {
+ ret_val = w;
+ } else {
+/* Computing 2nd power */
+ r__1 = z__ / w;
+ ret_val = w * sqrt(r__1 * r__1 + 1.f);
+ }
+ return ret_val;
+
+/* End of SLAPY2 */
+
+} /* slapy2_ */
+
+doublereal slapy3_(real *x, real *y, real *z__)
+{
+ /* System generated locals */
+ real ret_val, r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real w, xabs, yabs, zabs;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLAPY3 returns sqrt(x**2+y**2+z**2), taking care not to cause
+ unnecessary overflow.
+
+ Arguments
+ =========
+
+ X (input) REAL
+ Y (input) REAL
+ Z (input) REAL
+ X, Y and Z specify the values x, y and z.
+
+ =====================================================================
+*/
+
+
+ xabs = dabs(*x);
+ yabs = dabs(*y);
+ zabs = dabs(*z__);
+/* Computing MAX */
+ r__1 = max(xabs,yabs);
+ w = dmax(r__1,zabs);
+ if (w == 0.f) {
+ ret_val = 0.f;
+ } else {
+/* Computing 2nd power */
+ r__1 = xabs / w;
+/* Computing 2nd power */
+ r__2 = yabs / w;
+/* Computing 2nd power */
+ r__3 = zabs / w;
+ ret_val = w * sqrt(r__1 * r__1 + r__2 * r__2 + r__3 * r__3);
+ }
+ return ret_val;
+
+/* End of SLAPY3 */
+
+} /* slapy3_ */
+
+/* Subroutine */ int slarf_(char *side, integer *m, integer *n, real *v,
+ integer *incv, real *tau, real *c__, integer *ldc, real *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset;
+ real r__1;
+
+ /* Local variables */
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLARF applies a real elementary reflector H to a real m by n matrix
+ C, from either the left or the right. H is represented in the form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) REAL array, dimension
+ (1 + (M-1)*abs(INCV)) if SIDE = 'L'
+ or (1 + (N-1)*abs(INCV)) if SIDE = 'R'
+ The vector v in the representation of H. V is not used if
+ TAU = 0.
+
+ INCV (input) INTEGER
+ The increment between elements of v. INCV <> 0.
+
+ TAU (input) REAL
+ The value tau in the representation of H.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) REAL array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (lsame_(side, "L")) {
+
+/* Form H * C */
+
+ if (*tau != 0.f) {
+
+/* w := C' * v */
+
+ sgemv_("Transpose", m, n, &c_b1011, &c__[c_offset], ldc, &v[1],
+ incv, &c_b320, &work[1], &c__1);
+
+/* C := C - v * w' */
+
+ r__1 = -(*tau);
+ sger_(m, n, &r__1, &v[1], incv, &work[1], &c__1, &c__[c_offset],
+ ldc);
+ }
+ } else {
+
+/* Form C * H */
+
+ if (*tau != 0.f) {
+
+/* w := C * v */
+
+ sgemv_("No transpose", m, n, &c_b1011, &c__[c_offset], ldc, &v[1],
+ incv, &c_b320, &work[1], &c__1);
+
+/* C := C - w * v' */
+
+ r__1 = -(*tau);
+ sger_(m, n, &r__1, &work[1], &c__1, &v[1], incv, &c__[c_offset],
+ ldc);
+ }
+ }
+ return 0;
+
+/* End of SLARF */
+
+} /* slarf_ */
+
+/* Subroutine */ int slarfb_(char *side, char *trans, char *direct, char *
+ storev, integer *m, integer *n, integer *k, real *v, integer *ldv,
+ real *t, integer *ldt, real *c__, integer *ldc, real *work, integer *
+ ldwork)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, t_dim1, t_offset, v_dim1, v_offset, work_dim1,
+ work_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *), scopy_(integer *, real *,
+ integer *, real *, integer *), strmm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *);
+ static char transt[1];
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLARFB applies a real block reflector H or its transpose H' to a
+ real m by n matrix C, from either the left or the right.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply H or H' from the Left
+ = 'R': apply H or H' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply H (No transpose)
+ = 'T': apply H' (Transpose)
+
+ DIRECT (input) CHARACTER*1
+ Indicates how H is formed from a product of elementary
+ reflectors
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Indicates how the vectors which define the elementary
+ reflectors are stored:
+ = 'C': Columnwise
+ = 'R': Rowwise
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ K (input) INTEGER
+ The order of the matrix T (= the number of elementary
+ reflectors whose product defines the block reflector).
+
+ V (input) REAL array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,M) if STOREV = 'R' and SIDE = 'L'
+ (LDV,N) if STOREV = 'R' and SIDE = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M);
+ if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N);
+ if STOREV = 'R', LDV >= K.
+
+ T (input) REAL array, dimension (LDT,K)
+ The triangular k by k matrix T in the representation of the
+ block reflector.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by H*C or H'*C or C*H or C*H'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= max(1,M).
+
+ WORK (workspace) REAL array, dimension (LDWORK,K)
+
+ LDWORK (input) INTEGER
+ The leading dimension of the array WORK.
+ If SIDE = 'L', LDWORK >= max(1,N);
+ if SIDE = 'R', LDWORK >= max(1,M).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ work_dim1 = *ldwork;
+ work_offset = 1 + work_dim1;
+ work -= work_offset;
+
+ /* Function Body */
+ if ((*m <= 0) || (*n <= 0)) {
+ return 0;
+ }
+
+ if (lsame_(trans, "N")) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ if (lsame_(storev, "C")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 ) (first K rows)
+ ( V2 )
+ where V1 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L10: */
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", n, k, &
+ c_b1011, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2 */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "No transpose", n, k, &i__1, &c_b1011,
+ &c__[*k + 1 + c_dim1], ldc, &v[*k + 1 + v_dim1],
+ ldv, &c_b1011, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2 * W' */
+
+ i__1 = *m - *k;
+ sgemm_("No transpose", "Transpose", &i__1, n, k, &c_b1290,
+ &v[*k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork, &c_b1011, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b1011,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L40: */
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", m, k, &
+ c_b1011, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2 */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b1011, &c__[(*k + 1) * c_dim1 + 1], ldc, &v[*k
+ + 1 + v_dim1], ldv, &c_b1011, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2' */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "Transpose", m, &i__1, k, &c_b1290,
+ &work[work_offset], ldwork, &v[*k + 1 + v_dim1],
+ ldv, &c_b1011, &c__[(*k + 1) * c_dim1 + 1], ldc);
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b1011,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 )
+ ( V2 ) (last K rows)
+ where V2 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V = (C1'*V1 + C2'*V2) (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L70: */
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", n, k, &
+ c_b1011, &v[*m - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1 */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "No transpose", n, k, &i__1, &c_b1011,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b1011,
+ &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1 * W' */
+
+ i__1 = *m - *k;
+ sgemm_("No transpose", "Transpose", &i__1, n, k, &c_b1290,
+ &v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b1011, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b1011,
+ &v[*m - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V = (C1*V1 + C2*V2) (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L100: */
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", m, k, &
+ c_b1011, &v[*n - *k + 1 + v_dim1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1 */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "No transpose", m, k, &i__1, &
+ c_b1011, &c__[c_offset], ldc, &v[v_offset], ldv, &
+ c_b1011, &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V' */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1' */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "Transpose", m, &i__1, k, &c_b1290,
+ &work[work_offset], ldwork, &v[v_offset], ldv, &
+ c_b1011, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b1011,
+ &v[*n - *k + 1 + v_dim1], ldv, &work[work_offset],
+ ldwork);
+
+/* C2 := C2 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L110: */
+ }
+/* L120: */
+ }
+ }
+ }
+
+ } else if (lsame_(storev, "R")) {
+
+ if (lsame_(direct, "F")) {
+
+/*
+ Let V = ( V1 V2 ) (V1: first K columns)
+ where V1 is unit upper triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C1'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &c__[j + c_dim1], ldc, &work[j * work_dim1 + 1],
+ &c__1);
+/* L130: */
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", n, k, &c_b1011,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C2'*V2' */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "Transpose", n, k, &i__1, &c_b1011, &
+ c__[*k + 1 + c_dim1], ldc, &v[(*k + 1) * v_dim1 +
+ 1], ldv, &c_b1011, &work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Upper", transt, "Non-unit", n, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C2 := C2 - V2' * W' */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "Transpose", &i__1, n, k, &c_b1290, &
+ v[(*k + 1) * v_dim1 + 1], ldv, &work[work_offset],
+ ldwork, &c_b1011, &c__[*k + 1 + c_dim1], ldc);
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", n, k, &
+ c_b1011, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+
+/* C1 := C1 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[j + i__ * c_dim1] -= work[i__ + j * work_dim1];
+/* L140: */
+ }
+/* L150: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C1
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(m, &c__[j * c_dim1 + 1], &c__1, &work[j *
+ work_dim1 + 1], &c__1);
+/* L160: */
+ }
+
+/* W := W * V1' */
+
+ strmm_("Right", "Upper", "Transpose", "Unit", m, k, &c_b1011,
+ &v[v_offset], ldv, &work[work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C2 * V2' */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "Transpose", m, k, &i__1, &c_b1011,
+ &c__[(*k + 1) * c_dim1 + 1], ldc, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b1011, &work[work_offset],
+ ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Upper", trans, "Non-unit", m, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C2 := C2 - W * V2 */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b1290, &work[work_offset], ldwork, &v[(*k + 1) *
+ v_dim1 + 1], ldv, &c_b1011, &c__[(*k + 1) *
+ c_dim1 + 1], ldc);
+ }
+
+/* W := W * V1 */
+
+ strmm_("Right", "Upper", "No transpose", "Unit", m, k, &
+ c_b1011, &v[v_offset], ldv, &work[work_offset],
+ ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + j * c_dim1] -= work[i__ + j * work_dim1];
+/* L170: */
+ }
+/* L180: */
+ }
+
+ }
+
+ } else {
+
+/*
+ Let V = ( V1 V2 ) (V2: last K columns)
+ where V2 is unit lower triangular.
+*/
+
+ if (lsame_(side, "L")) {
+
+/*
+ Form H * C or H' * C where C = ( C1 )
+ ( C2 )
+
+ W := C' * V' = (C1'*V1' + C2'*V2') (stored in WORK)
+
+ W := C2'
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(n, &c__[*m - *k + j + c_dim1], ldc, &work[j *
+ work_dim1 + 1], &c__1);
+/* L190: */
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", n, k, &c_b1011,
+ &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*m > *k) {
+
+/* W := W + C1'*V1' */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "Transpose", n, k, &i__1, &c_b1011, &
+ c__[c_offset], ldc, &v[v_offset], ldv, &c_b1011, &
+ work[work_offset], ldwork);
+ }
+
+/* W := W * T' or W * T */
+
+ strmm_("Right", "Lower", transt, "Non-unit", n, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - V' * W' */
+
+ if (*m > *k) {
+
+/* C1 := C1 - V1' * W' */
+
+ i__1 = *m - *k;
+ sgemm_("Transpose", "Transpose", &i__1, n, k, &c_b1290, &
+ v[v_offset], ldv, &work[work_offset], ldwork, &
+ c_b1011, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", n, k, &
+ c_b1011, &v[(*m - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C2 := C2 - W' */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[*m - *k + j + i__ * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L200: */
+ }
+/* L210: */
+ }
+
+ } else if (lsame_(side, "R")) {
+
+/*
+ Form C * H or C * H' where C = ( C1 C2 )
+
+ W := C * V' = (C1*V1' + C2*V2') (stored in WORK)
+
+ W := C2
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ scopy_(m, &c__[(*n - *k + j) * c_dim1 + 1], &c__1, &work[
+ j * work_dim1 + 1], &c__1);
+/* L220: */
+ }
+
+/* W := W * V2' */
+
+ strmm_("Right", "Lower", "Transpose", "Unit", m, k, &c_b1011,
+ &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+ if (*n > *k) {
+
+/* W := W + C1 * V1' */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "Transpose", m, k, &i__1, &c_b1011,
+ &c__[c_offset], ldc, &v[v_offset], ldv, &c_b1011,
+ &work[work_offset], ldwork);
+ }
+
+/* W := W * T or W * T' */
+
+ strmm_("Right", "Lower", trans, "Non-unit", m, k, &c_b1011, &
+ t[t_offset], ldt, &work[work_offset], ldwork);
+
+/* C := C - W * V */
+
+ if (*n > *k) {
+
+/* C1 := C1 - W * V1 */
+
+ i__1 = *n - *k;
+ sgemm_("No transpose", "No transpose", m, &i__1, k, &
+ c_b1290, &work[work_offset], ldwork, &v[v_offset],
+ ldv, &c_b1011, &c__[c_offset], ldc);
+ }
+
+/* W := W * V2 */
+
+ strmm_("Right", "Lower", "No transpose", "Unit", m, k, &
+ c_b1011, &v[(*n - *k + 1) * v_dim1 + 1], ldv, &work[
+ work_offset], ldwork);
+
+/* C1 := C1 - W */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ c__[i__ + (*n - *k + j) * c_dim1] -= work[i__ + j *
+ work_dim1];
+/* L230: */
+ }
+/* L240: */
+ }
+
+ }
+
+ }
+ }
+
+ return 0;
+
+/* End of SLARFB */
+
+} /* slarfb_ */
+
+/* Subroutine */ int slarfg_(integer *n, real *alpha, real *x, integer *incx,
+ real *tau)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double r_sign(real *, real *);
+
+ /* Local variables */
+ static integer j, knt;
+ static real beta;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static real xnorm;
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ static real safmin, rsafmn;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLARFG generates a real elementary reflector H of order n, such
+ that
+
+ H * ( alpha ) = ( beta ), H' * H = I.
+ ( x ) ( 0 )
+
+ where alpha and beta are scalars, and x is an (n-1)-element real
+ vector. H is represented in the form
+
+ H = I - tau * ( 1 ) * ( 1 v' ) ,
+ ( v )
+
+ where tau is a real scalar and v is a real (n-1)-element
+ vector.
+
+ If the elements of x are all zero, then tau = 0 and H is taken to be
+ the unit matrix.
+
+ Otherwise 1 <= tau <= 2.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the elementary reflector.
+
+ ALPHA (input/output) REAL
+ On entry, the value alpha.
+ On exit, it is overwritten with the value beta.
+
+ X (input/output) REAL array, dimension
+ (1+(N-2)*abs(INCX))
+ On entry, the vector x.
+ On exit, it is overwritten with the vector v.
+
+ INCX (input) INTEGER
+ The increment between elements of X. INCX > 0.
+
+ TAU (output) REAL
+ The value tau.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n <= 1) {
+ *tau = 0.f;
+ return 0;
+ }
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+
+ if (xnorm == 0.f) {
+
+/* H = I */
+
+ *tau = 0.f;
+ } else {
+
+/* general case */
+
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = -r_sign(&r__1, alpha);
+ safmin = slamch_("S") / slamch_("E");
+ if (dabs(beta) < safmin) {
+
+/* XNORM, BETA may be inaccurate; scale X and recompute them */
+
+ rsafmn = 1.f / safmin;
+ knt = 0;
+L10:
+ ++knt;
+ i__1 = *n - 1;
+ sscal_(&i__1, &rsafmn, &x[1], incx);
+ beta *= rsafmn;
+ *alpha *= rsafmn;
+ if (dabs(beta) < safmin) {
+ goto L10;
+ }
+
+/* New BETA is at most 1, at least SAFMIN */
+
+ i__1 = *n - 1;
+ xnorm = snrm2_(&i__1, &x[1], incx);
+ r__1 = slapy2_(alpha, &xnorm);
+ beta = -r_sign(&r__1, alpha);
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ r__1 = 1.f / (*alpha - beta);
+ sscal_(&i__1, &r__1, &x[1], incx);
+
+/* If ALPHA is subnormal, it may lose relative accuracy */
+
+ *alpha = beta;
+ i__1 = knt;
+ for (j = 1; j <= i__1; ++j) {
+ *alpha *= safmin;
+/* L20: */
+ }
+ } else {
+ *tau = (beta - *alpha) / beta;
+ i__1 = *n - 1;
+ r__1 = 1.f / (*alpha - beta);
+ sscal_(&i__1, &r__1, &x[1], incx);
+ *alpha = beta;
+ }
+ }
+
+ return 0;
+
+/* End of SLARFG */
+
+} /* slarfg_ */
+
+/* Subroutine */ int slarft_(char *direct, char *storev, integer *n, integer *
+ k, real *v, integer *ldv, real *tau, real *t, integer *ldt)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, v_dim1, v_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, j;
+ static real vii;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *), strmv_(char *, char *, char *, integer *, real *,
+ integer *, real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLARFT forms the triangular factor T of a real block reflector H
+ of order n, which is defined as a product of k elementary reflectors.
+
+ If DIRECT = 'F', H = H(1) H(2) . . . H(k) and T is upper triangular;
+
+ If DIRECT = 'B', H = H(k) . . . H(2) H(1) and T is lower triangular.
+
+ If STOREV = 'C', the vector which defines the elementary reflector
+ H(i) is stored in the i-th column of the array V, and
+
+ H = I - V * T * V'
+
+ If STOREV = 'R', the vector which defines the elementary reflector
+ H(i) is stored in the i-th row of the array V, and
+
+ H = I - V' * T * V
+
+ Arguments
+ =========
+
+ DIRECT (input) CHARACTER*1
+ Specifies the order in which the elementary reflectors are
+ multiplied to form the block reflector:
+ = 'F': H = H(1) H(2) . . . H(k) (Forward)
+ = 'B': H = H(k) . . . H(2) H(1) (Backward)
+
+ STOREV (input) CHARACTER*1
+ Specifies how the vectors which define the elementary
+ reflectors are stored (see also Further Details):
+ = 'C': columnwise
+ = 'R': rowwise
+
+ N (input) INTEGER
+ The order of the block reflector H. N >= 0.
+
+ K (input) INTEGER
+ The order of the triangular factor T (= the number of
+ elementary reflectors). K >= 1.
+
+ V (input/output) REAL array, dimension
+ (LDV,K) if STOREV = 'C'
+ (LDV,N) if STOREV = 'R'
+ The matrix V. See further details.
+
+ LDV (input) INTEGER
+ The leading dimension of the array V.
+ If STOREV = 'C', LDV >= max(1,N); if STOREV = 'R', LDV >= K.
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i).
+
+ T (output) REAL array, dimension (LDT,K)
+ The k by k triangular factor T of the block reflector.
+ If DIRECT = 'F', T is upper triangular; if DIRECT = 'B', T is
+ lower triangular. The rest of the array is not used.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= K.
+
+ Further Details
+ ===============
+
+ The shape of the matrix V and the storage of the vectors which define
+ the H(i) is best illustrated by the following example with n = 5 and
+ k = 3. The elements equal to 1 are not stored; the corresponding
+ array elements are modified but restored on exit. The rest of the
+ array is not used.
+
+ DIRECT = 'F' and STOREV = 'C': DIRECT = 'F' and STOREV = 'R':
+
+ V = ( 1 ) V = ( 1 v1 v1 v1 v1 )
+ ( v1 1 ) ( 1 v2 v2 v2 )
+ ( v1 v2 1 ) ( 1 v3 v3 )
+ ( v1 v2 v3 )
+ ( v1 v2 v3 )
+
+ DIRECT = 'B' and STOREV = 'C': DIRECT = 'B' and STOREV = 'R':
+
+ V = ( v1 v2 v3 ) V = ( v1 v1 1 )
+ ( v1 v2 v3 ) ( v2 v2 v2 1 )
+ ( 1 v2 v3 ) ( v3 v3 v3 v3 1 )
+ ( 1 v3 )
+ ( 1 )
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ v_dim1 = *ldv;
+ v_offset = 1 + v_dim1;
+ v -= v_offset;
+ --tau;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+
+ /* Function Body */
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (lsame_(direct, "F")) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (tau[i__] == 0.f) {
+
+/* H(i) = I */
+
+ i__2 = i__;
+ for (j = 1; j <= i__2; ++j) {
+ t[j + i__ * t_dim1] = 0.f;
+/* L10: */
+ }
+ } else {
+
+/* general case */
+
+ vii = v[i__ + i__ * v_dim1];
+ v[i__ + i__ * v_dim1] = 1.f;
+ if (lsame_(storev, "C")) {
+
+/* T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ r__1 = -tau[i__];
+ sgemv_("Transpose", &i__2, &i__3, &r__1, &v[i__ + v_dim1],
+ ldv, &v[i__ + i__ * v_dim1], &c__1, &c_b320, &t[
+ i__ * t_dim1 + 1], &c__1);
+ } else {
+
+/* T(1:i-1,i) := - tau(i) * V(1:i-1,i:n) * V(i,i:n)' */
+
+ i__2 = i__ - 1;
+ i__3 = *n - i__ + 1;
+ r__1 = -tau[i__];
+ sgemv_("No transpose", &i__2, &i__3, &r__1, &v[i__ *
+ v_dim1 + 1], ldv, &v[i__ + i__ * v_dim1], ldv, &
+ c_b320, &t[i__ * t_dim1 + 1], &c__1);
+ }
+ v[i__ + i__ * v_dim1] = vii;
+
+/* T(1:i-1,i) := T(1:i-1,1:i-1) * T(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ strmv_("Upper", "No transpose", "Non-unit", &i__2, &t[
+ t_offset], ldt, &t[i__ * t_dim1 + 1], &c__1);
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L20: */
+ }
+ } else {
+ for (i__ = *k; i__ >= 1; --i__) {
+ if (tau[i__] == 0.f) {
+
+/* H(i) = I */
+
+ i__1 = *k;
+ for (j = i__; j <= i__1; ++j) {
+ t[j + i__ * t_dim1] = 0.f;
+/* L30: */
+ }
+ } else {
+
+/* general case */
+
+ if (i__ < *k) {
+ if (lsame_(storev, "C")) {
+ vii = v[*n - *k + i__ + i__ * v_dim1];
+ v[*n - *k + i__ + i__ * v_dim1] = 1.f;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(1:n-k+i,i+1:k)' * V(1:n-k+i,i)
+*/
+
+ i__1 = *n - *k + i__;
+ i__2 = *k - i__;
+ r__1 = -tau[i__];
+ sgemv_("Transpose", &i__1, &i__2, &r__1, &v[(i__ + 1)
+ * v_dim1 + 1], ldv, &v[i__ * v_dim1 + 1], &
+ c__1, &c_b320, &t[i__ + 1 + i__ * t_dim1], &
+ c__1);
+ v[*n - *k + i__ + i__ * v_dim1] = vii;
+ } else {
+ vii = v[i__ + (*n - *k + i__) * v_dim1];
+ v[i__ + (*n - *k + i__) * v_dim1] = 1.f;
+
+/*
+ T(i+1:k,i) :=
+ - tau(i) * V(i+1:k,1:n-k+i) * V(i,1:n-k+i)'
+*/
+
+ i__1 = *k - i__;
+ i__2 = *n - *k + i__;
+ r__1 = -tau[i__];
+ sgemv_("No transpose", &i__1, &i__2, &r__1, &v[i__ +
+ 1 + v_dim1], ldv, &v[i__ + v_dim1], ldv, &
+ c_b320, &t[i__ + 1 + i__ * t_dim1], &c__1);
+ v[i__ + (*n - *k + i__) * v_dim1] = vii;
+ }
+
+/* T(i+1:k,i) := T(i+1:k,i+1:k) * T(i+1:k,i) */
+
+ i__1 = *k - i__;
+ strmv_("Lower", "No transpose", "Non-unit", &i__1, &t[i__
+ + 1 + (i__ + 1) * t_dim1], ldt, &t[i__ + 1 + i__ *
+ t_dim1], &c__1)
+ ;
+ }
+ t[i__ + i__ * t_dim1] = tau[i__];
+ }
+/* L40: */
+ }
+ }
+ return 0;
+
+/* End of SLARFT */
+
+} /* slarft_ */
+
+/* Subroutine */ int slarfx_(char *side, integer *m, integer *n, real *v,
+ real *tau, real *c__, integer *ldc, real *work)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, i__1;
+ real r__1;
+
+ /* Local variables */
+ static integer j;
+ static real t1, t2, t3, t4, t5, t6, t7, t8, t9, v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, t10, v10, sum;
+ extern /* Subroutine */ int sger_(integer *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLARFX applies a real elementary reflector H to a real m by n
+ matrix C, from either the left or the right. H is represented in the
+ form
+
+ H = I - tau * v * v'
+
+ where tau is a real scalar and v is a real vector.
+
+ If tau = 0, then H is taken to be the unit matrix
+
+ This version uses inline code if H has order < 11.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': form H * C
+ = 'R': form C * H
+
+ M (input) INTEGER
+ The number of rows of the matrix C.
+
+ N (input) INTEGER
+ The number of columns of the matrix C.
+
+ V (input) REAL array, dimension (M) if SIDE = 'L'
+ or (N) if SIDE = 'R'
+ The vector v in the representation of H.
+
+ TAU (input) REAL
+ The value tau in the representation of H.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by the matrix H * C if SIDE = 'L',
+ or C * H if SIDE = 'R'.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDA >= (1,M).
+
+ WORK (workspace) REAL array, dimension
+ (N) if SIDE = 'L'
+ or (M) if SIDE = 'R'
+ WORK is not referenced if H has order < 11.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --v;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ if (*tau == 0.f) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form H * C, where H has order m. */
+
+ switch (*m) {
+ case 1: goto L10;
+ case 2: goto L30;
+ case 3: goto L50;
+ case 4: goto L70;
+ case 5: goto L90;
+ case 6: goto L110;
+ case 7: goto L130;
+ case 8: goto L150;
+ case 9: goto L170;
+ case 10: goto L190;
+ }
+
+/*
+ Code for general M
+
+ w := C'*v
+*/
+
+ sgemv_("Transpose", m, n, &c_b1011, &c__[c_offset], ldc, &v[1], &c__1,
+ &c_b320, &work[1], &c__1);
+
+/* C := C - tau * v * w' */
+
+ r__1 = -(*tau);
+ sger_(m, n, &r__1, &v[1], &c__1, &work[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L10:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1.f - *tau * v[1] * v[1];
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j * c_dim1 + 1] = t1 * c__[j * c_dim1 + 1];
+/* L20: */
+ }
+ goto L410;
+L30:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+/* L40: */
+ }
+ goto L410;
+L50:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+/* L60: */
+ }
+ goto L410;
+L70:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+/* L80: */
+ }
+ goto L410;
+L90:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+/* L100: */
+ }
+ goto L410;
+L110:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+/* L120: */
+ }
+ goto L410;
+L130:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+/* L140: */
+ }
+ goto L410;
+L150:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+/* L160: */
+ }
+ goto L410;
+L170:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+/* L180: */
+ }
+ goto L410;
+L190:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j * c_dim1 + 1] + v2 * c__[j * c_dim1 + 2] + v3 *
+ c__[j * c_dim1 + 3] + v4 * c__[j * c_dim1 + 4] + v5 * c__[
+ j * c_dim1 + 5] + v6 * c__[j * c_dim1 + 6] + v7 * c__[j *
+ c_dim1 + 7] + v8 * c__[j * c_dim1 + 8] + v9 * c__[j *
+ c_dim1 + 9] + v10 * c__[j * c_dim1 + 10];
+ c__[j * c_dim1 + 1] -= sum * t1;
+ c__[j * c_dim1 + 2] -= sum * t2;
+ c__[j * c_dim1 + 3] -= sum * t3;
+ c__[j * c_dim1 + 4] -= sum * t4;
+ c__[j * c_dim1 + 5] -= sum * t5;
+ c__[j * c_dim1 + 6] -= sum * t6;
+ c__[j * c_dim1 + 7] -= sum * t7;
+ c__[j * c_dim1 + 8] -= sum * t8;
+ c__[j * c_dim1 + 9] -= sum * t9;
+ c__[j * c_dim1 + 10] -= sum * t10;
+/* L200: */
+ }
+ goto L410;
+ } else {
+
+/* Form C * H, where H has order n. */
+
+ switch (*n) {
+ case 1: goto L210;
+ case 2: goto L230;
+ case 3: goto L250;
+ case 4: goto L270;
+ case 5: goto L290;
+ case 6: goto L310;
+ case 7: goto L330;
+ case 8: goto L350;
+ case 9: goto L370;
+ case 10: goto L390;
+ }
+
+/*
+ Code for general N
+
+ w := C * v
+*/
+
+ sgemv_("No transpose", m, n, &c_b1011, &c__[c_offset], ldc, &v[1], &
+ c__1, &c_b320, &work[1], &c__1);
+
+/* C := C - tau * w * v' */
+
+ r__1 = -(*tau);
+ sger_(m, n, &r__1, &work[1], &c__1, &v[1], &c__1, &c__[c_offset], ldc)
+ ;
+ goto L410;
+L210:
+
+/* Special code for 1 x 1 Householder */
+
+ t1 = 1.f - *tau * v[1] * v[1];
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ c__[j + c_dim1] = t1 * c__[j + c_dim1];
+/* L220: */
+ }
+ goto L410;
+L230:
+
+/* Special code for 2 x 2 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+/* L240: */
+ }
+ goto L410;
+L250:
+
+/* Special code for 3 x 3 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+/* L260: */
+ }
+ goto L410;
+L270:
+
+/* Special code for 4 x 4 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+/* L280: */
+ }
+ goto L410;
+L290:
+
+/* Special code for 5 x 5 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+/* L300: */
+ }
+ goto L410;
+L310:
+
+/* Special code for 6 x 6 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+/* L320: */
+ }
+ goto L410;
+L330:
+
+/* Special code for 7 x 7 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+/* L340: */
+ }
+ goto L410;
+L350:
+
+/* Special code for 8 x 8 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+/* L360: */
+ }
+ goto L410;
+L370:
+
+/* Special code for 9 x 9 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+/* L380: */
+ }
+ goto L410;
+L390:
+
+/* Special code for 10 x 10 Householder */
+
+ v1 = v[1];
+ t1 = *tau * v1;
+ v2 = v[2];
+ t2 = *tau * v2;
+ v3 = v[3];
+ t3 = *tau * v3;
+ v4 = v[4];
+ t4 = *tau * v4;
+ v5 = v[5];
+ t5 = *tau * v5;
+ v6 = v[6];
+ t6 = *tau * v6;
+ v7 = v[7];
+ t7 = *tau * v7;
+ v8 = v[8];
+ t8 = *tau * v8;
+ v9 = v[9];
+ t9 = *tau * v9;
+ v10 = v[10];
+ t10 = *tau * v10;
+ i__1 = *m;
+ for (j = 1; j <= i__1; ++j) {
+ sum = v1 * c__[j + c_dim1] + v2 * c__[j + ((c_dim1) << (1))] + v3
+ * c__[j + c_dim1 * 3] + v4 * c__[j + ((c_dim1) << (2))] +
+ v5 * c__[j + c_dim1 * 5] + v6 * c__[j + c_dim1 * 6] + v7 *
+ c__[j + c_dim1 * 7] + v8 * c__[j + ((c_dim1) << (3))] +
+ v9 * c__[j + c_dim1 * 9] + v10 * c__[j + c_dim1 * 10];
+ c__[j + c_dim1] -= sum * t1;
+ c__[j + ((c_dim1) << (1))] -= sum * t2;
+ c__[j + c_dim1 * 3] -= sum * t3;
+ c__[j + ((c_dim1) << (2))] -= sum * t4;
+ c__[j + c_dim1 * 5] -= sum * t5;
+ c__[j + c_dim1 * 6] -= sum * t6;
+ c__[j + c_dim1 * 7] -= sum * t7;
+ c__[j + ((c_dim1) << (3))] -= sum * t8;
+ c__[j + c_dim1 * 9] -= sum * t9;
+ c__[j + c_dim1 * 10] -= sum * t10;
+/* L400: */
+ }
+ goto L410;
+ }
+L410:
+ return 0;
+
+/* End of SLARFX */
+
+} /* slarfx_ */
+
+/* Subroutine */ int slartg_(real *f, real *g, real *cs, real *sn, real *r__)
+{
+ /* Initialized data */
+
+ static logical first = TRUE_;
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal), pow_ri(real *, integer *), sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static real f1, g1, eps, scale;
+ static integer count;
+ static real safmn2, safmx2;
+ extern doublereal slamch_(char *);
+ static real safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLARTG generate a plane rotation so that
+
+ [ CS SN ] . [ F ] = [ R ] where CS**2 + SN**2 = 1.
+ [ -SN CS ] [ G ] [ 0 ]
+
+ This is a slower, more accurate version of the BLAS1 routine SROTG,
+ with the following other differences:
+ F and G are unchanged on return.
+ If G=0, then CS=1 and SN=0.
+ If F=0 and (G .ne. 0), then CS=0 and SN=1 without doing any
+ floating point operations (saves work in SBDSQR when
+ there are zeros on the diagonal).
+
+ If F exceeds G in magnitude, CS will be positive.
+
+ Arguments
+ =========
+
+ F (input) REAL
+ The first component of vector to be rotated.
+
+ G (input) REAL
+ The second component of vector to be rotated.
+
+ CS (output) REAL
+ The cosine of the rotation.
+
+ SN (output) REAL
+ The sine of the rotation.
+
+ R (output) REAL
+ The nonzero component of the rotated vector.
+
+ =====================================================================
+*/
+
+
+ if (first) {
+ first = FALSE_;
+ safmin = slamch_("S");
+ eps = slamch_("E");
+ r__1 = slamch_("B");
+ i__1 = (integer) (log(safmin / eps) / log(slamch_("B")) /
+ 2.f);
+ safmn2 = pow_ri(&r__1, &i__1);
+ safmx2 = 1.f / safmn2;
+ }
+ if (*g == 0.f) {
+ *cs = 1.f;
+ *sn = 0.f;
+ *r__ = *f;
+ } else if (*f == 0.f) {
+ *cs = 0.f;
+ *sn = 1.f;
+ *r__ = *g;
+ } else {
+ f1 = *f;
+ g1 = *g;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale >= safmx2) {
+ count = 0;
+L10:
+ ++count;
+ f1 *= safmn2;
+ g1 *= safmn2;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale >= safmx2) {
+ goto L10;
+ }
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmx2;
+/* L20: */
+ }
+ } else if (scale <= safmn2) {
+ count = 0;
+L30:
+ ++count;
+ f1 *= safmx2;
+ g1 *= safmx2;
+/* Computing MAX */
+ r__1 = dabs(f1), r__2 = dabs(g1);
+ scale = dmax(r__1,r__2);
+ if (scale <= safmn2) {
+ goto L30;
+ }
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ i__1 = count;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ *r__ *= safmn2;
+/* L40: */
+ }
+ } else {
+/* Computing 2nd power */
+ r__1 = f1;
+/* Computing 2nd power */
+ r__2 = g1;
+ *r__ = sqrt(r__1 * r__1 + r__2 * r__2);
+ *cs = f1 / *r__;
+ *sn = g1 / *r__;
+ }
+ if (dabs(*f) > dabs(*g) && *cs < 0.f) {
+ *cs = -(*cs);
+ *sn = -(*sn);
+ *r__ = -(*r__);
+ }
+ }
+ return 0;
+
+/* End of SLARTG */
+
+} /* slartg_ */
+
+/* Subroutine */ int slas2_(real *f, real *g, real *h__, real *ssmin, real *
+ ssmax)
+{
+ /* System generated locals */
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real c__, fa, ga, ha, as, at, au, fhmn, fhmx;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SLAS2 computes the singular values of the 2-by-2 matrix
+ [ F G ]
+ [ 0 H ].
+ On return, SSMIN is the smaller singular value and SSMAX is the
+ larger singular value.
+
+ Arguments
+ =========
+
+ F (input) REAL
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) REAL
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) REAL
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) REAL
+ The smaller singular value.
+
+ SSMAX (output) REAL
+ The larger singular value.
+
+ Further Details
+ ===============
+
+ Barring over/underflow, all output quantities are correct to within
+ a few units in the last place (ulps), even in the absence of a guard
+ digit in addition/subtraction.
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows, or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ ====================================================================
+*/
+
+
+ fa = dabs(*f);
+ ga = dabs(*g);
+ ha = dabs(*h__);
+ fhmn = dmin(fa,ha);
+ fhmx = dmax(fa,ha);
+ if (fhmn == 0.f) {
+ *ssmin = 0.f;
+ if (fhmx == 0.f) {
+ *ssmax = ga;
+ } else {
+/* Computing 2nd power */
+ r__1 = dmin(fhmx,ga) / dmax(fhmx,ga);
+ *ssmax = dmax(fhmx,ga) * sqrt(r__1 * r__1 + 1.f);
+ }
+ } else {
+ if (ga < fhmx) {
+ as = fhmn / fhmx + 1.f;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ r__1 = ga / fhmx;
+ au = r__1 * r__1;
+ c__ = 2.f / (sqrt(as * as + au) + sqrt(at * at + au));
+ *ssmin = fhmn * c__;
+ *ssmax = fhmx / c__;
+ } else {
+ au = fhmx / ga;
+ if (au == 0.f) {
+
+/*
+ Avoid possible harmful underflow if exponent range
+ asymmetric (true SSMIN may not underflow even if
+ AU underflows)
+*/
+
+ *ssmin = fhmn * fhmx / ga;
+ *ssmax = ga;
+ } else {
+ as = fhmn / fhmx + 1.f;
+ at = (fhmx - fhmn) / fhmx;
+/* Computing 2nd power */
+ r__1 = as * au;
+/* Computing 2nd power */
+ r__2 = at * au;
+ c__ = 1.f / (sqrt(r__1 * r__1 + 1.f) + sqrt(r__2 * r__2 + 1.f)
+ );
+ *ssmin = fhmn * c__ * au;
+ *ssmin += *ssmin;
+ *ssmax = ga / (c__ + c__);
+ }
+ }
+ }
+ return 0;
+
+/* End of SLAS2 */
+
+} /* slas2_ */
+
+/* Subroutine */ int slascl_(char *type__, integer *kl, integer *ku, real *
+ cfrom, real *cto, integer *m, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5;
+
+ /* Local variables */
+ static integer i__, j, k1, k2, k3, k4;
+ static real mul, cto1;
+ static logical done;
+ static real ctoc;
+ extern logical lsame_(char *, char *);
+ static integer itype;
+ static real cfrom1;
+ extern doublereal slamch_(char *);
+ static real cfromc;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real bignum, smlnum;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLASCL multiplies the M by N real matrix A by the real scalar
+ CTO/CFROM. This is done without over/underflow as long as the final
+ result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
+ A may be full, upper triangular, lower triangular, upper Hessenberg,
+ or banded.
+
+ Arguments
+ =========
+
+ TYPE (input) CHARACTER*1
+ TYPE indices the storage type of the input matrix.
+ = 'G': A is a full matrix.
+ = 'L': A is a lower triangular matrix.
+ = 'U': A is an upper triangular matrix.
+ = 'H': A is an upper Hessenberg matrix.
+ = 'B': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the lower
+ half stored.
+ = 'Q': A is a symmetric band matrix with lower bandwidth KL
+ and upper bandwidth KU and with the only the upper
+ half stored.
+ = 'Z': A is a band matrix with lower bandwidth KL and upper
+ bandwidth KU.
+
+ KL (input) INTEGER
+ The lower bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ KU (input) INTEGER
+ The upper bandwidth of A. Referenced only if TYPE = 'B',
+ 'Q' or 'Z'.
+
+ CFROM (input) REAL
+ CTO (input) REAL
+ The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
+ without over/underflow if the final result CTO*A(I,J)/CFROM
+ can be represented without over/underflow. CFROM must be
+ nonzero.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,M)
+ The matrix to be multiplied by CTO/CFROM. See TYPE for the
+ storage type.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ INFO (output) INTEGER
+ 0 - successful exit
+ <0 - if INFO = -i, the i-th argument had an illegal value.
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(type__, "G")) {
+ itype = 0;
+ } else if (lsame_(type__, "L")) {
+ itype = 1;
+ } else if (lsame_(type__, "U")) {
+ itype = 2;
+ } else if (lsame_(type__, "H")) {
+ itype = 3;
+ } else if (lsame_(type__, "B")) {
+ itype = 4;
+ } else if (lsame_(type__, "Q")) {
+ itype = 5;
+ } else if (lsame_(type__, "Z")) {
+ itype = 6;
+ } else {
+ itype = -1;
+ }
+
+ if (itype == -1) {
+ *info = -1;
+ } else if (*cfrom == 0.f) {
+ *info = -4;
+ } else if (*m < 0) {
+ *info = -6;
+ } else if (((*n < 0) || (itype == 4 && *n != *m)) || (itype == 5 && *n !=
+ *m)) {
+ *info = -7;
+ } else if (itype <= 3 && *lda < max(1,*m)) {
+ *info = -9;
+ } else if (itype >= 4) {
+/* Computing MAX */
+ i__1 = *m - 1;
+ if ((*kl < 0) || (*kl > max(i__1,0))) {
+ *info = -2;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = *n - 1;
+ if (((*ku < 0) || (*ku > max(i__1,0))) || (((itype == 4) || (
+ itype == 5)) && *kl != *ku)) {
+ *info = -3;
+ } else if (((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda
+ < *ku + 1)) || (itype == 6 && *lda < ((*kl) << (1)) + *ku
+ + 1)) {
+ *info = -9;
+ }
+ }
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASCL", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*m == 0)) {
+ return 0;
+ }
+
+/* Get machine parameters */
+
+ smlnum = slamch_("S");
+ bignum = 1.f / smlnum;
+
+ cfromc = *cfrom;
+ ctoc = *cto;
+
+L10:
+ cfrom1 = cfromc * smlnum;
+ cto1 = ctoc / bignum;
+ if (dabs(cfrom1) > dabs(ctoc) && ctoc != 0.f) {
+ mul = smlnum;
+ done = FALSE_;
+ cfromc = cfrom1;
+ } else if (dabs(cto1) > dabs(cfromc)) {
+ mul = bignum;
+ done = FALSE_;
+ ctoc = cto1;
+ } else {
+ mul = ctoc / cfromc;
+ done = TRUE_;
+ }
+
+ if (itype == 0) {
+
+/* Full matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L20: */
+ }
+/* L30: */
+ }
+
+ } else if (itype == 1) {
+
+/* Lower triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L40: */
+ }
+/* L50: */
+ }
+
+ } else if (itype == 2) {
+
+/* Upper triangular matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = min(j,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L60: */
+ }
+/* L70: */
+ }
+
+ } else if (itype == 3) {
+
+/* Upper Hessenberg matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j + 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L80: */
+ }
+/* L90: */
+ }
+
+ } else if (itype == 4) {
+
+/* Lower half of a symmetric band matrix */
+
+ k3 = *kl + 1;
+ k4 = *n + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = k3, i__4 = k4 - j;
+ i__2 = min(i__3,i__4);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L100: */
+ }
+/* L110: */
+ }
+
+ } else if (itype == 5) {
+
+/* Upper half of a symmetric band matrix */
+
+ k1 = *ku + 2;
+ k3 = *ku + 1;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__2 = k1 - j;
+ i__3 = k3;
+ for (i__ = max(i__2,1); i__ <= i__3; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L120: */
+ }
+/* L130: */
+ }
+
+ } else if (itype == 6) {
+
+/* Band matrix */
+
+ k1 = *kl + *ku + 2;
+ k2 = *kl + 1;
+ k3 = ((*kl) << (1)) + *ku + 1;
+ k4 = *kl + *ku + 1 + *m;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+/* Computing MAX */
+ i__3 = k1 - j;
+/* Computing MIN */
+ i__4 = k3, i__5 = k4 - j;
+ i__2 = min(i__4,i__5);
+ for (i__ = max(i__3,k2); i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] *= mul;
+/* L140: */
+ }
+/* L150: */
+ }
+
+ }
+
+ if (! done) {
+ goto L10;
+ }
+
+ return 0;
+
+/* End of SLASCL */
+
+} /* slascl_ */
+
+/* Subroutine */ int slasd0_(integer *n, integer *sqre, real *d__, real *e,
+ real *u, integer *ldu, real *vt, integer *ldvt, integer *smlsiz,
+ integer *iwork, real *work, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf,
+ iwk, lvl, ndb1, nlp1, nrp1;
+ static real beta;
+ static integer idxq, nlvl;
+ static real alpha;
+ static integer inode, ndiml, idxqc, ndimr, itemp, sqrei;
+ extern /* Subroutine */ int slasd1_(integer *, integer *, integer *, real
+ *, real *, real *, real *, integer *, real *, integer *, integer *
+ , integer *, real *, integer *), xerbla_(char *, integer *), slasdq_(char *, integer *, integer *, integer *, integer
+ *, integer *, real *, real *, real *, integer *, real *, integer *
+ , real *, integer *, real *, integer *), slasdt_(integer *
+ , integer *, integer *, integer *, integer *, integer *, integer *
+ );
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, SLASD0 computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M
+ matrix B with diagonal D and offdiagonal E, where M = N + SQRE.
+ The algorithm computes orthogonal matrices U and VT such that
+ B = U * S * VT. The singular values S are overwritten on D.
+
+ A related subroutine, SLASDA, computes only the singular values,
+ and optionally, the singular vectors in compact form.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the row dimension of the upper bidiagonal matrix.
+ This is also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N+1;
+
+ D (input/output) REAL array, dimension (N)
+ On entry D contains the main diagonal of the bidiagonal
+ matrix.
+ On exit D, if INFO = 0, contains its singular values.
+
+ E (input) REAL array, dimension (M-1)
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) REAL array, dimension at least (LDQ, N)
+ On exit, U contains the left singular vectors.
+
+ LDU (input) INTEGER
+ On entry, leading dimension of U.
+
+ VT (output) REAL array, dimension at least (LDVT, M)
+ On exit, VT' contains the right singular vectors.
+
+ LDVT (input) INTEGER
+ On entry, leading dimension of VT.
+
+ SMLSIZ (input) INTEGER
+ On entry, maximum size of the subproblems at the
+ bottom of the computation tree.
+
+ IWORK INTEGER work array.
+ Dimension must be at least (8 * N)
+
+ WORK REAL work array.
+ Dimension must be at least (3 * M**2 + 2 * M)
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -2;
+ }
+
+ m = *n + *sqre;
+
+ if (*ldu < *n) {
+ *info = -6;
+ } else if (*ldvt < m) {
+ *info = -8;
+ } else if (*smlsiz < 3) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD0", &i__1);
+ return 0;
+ }
+
+/* If the input matrix is too small, call SLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ slasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset],
+ ldvt, &u[u_offset], ldu, &u[u_offset], ldu, &work[1], info);
+ return 0;
+ }
+
+/* Set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ For the nodes on bottom level of the tree, solve
+ their subproblems by SLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ ncc = 0;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nrp1 = nr + 1;
+ nlf = ic - nl;
+ nrf = ic + 1;
+ sqrei = 1;
+ slasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &vt[
+ nlf + nlf * vt_dim1], ldvt, &u[nlf + nlf * u_dim1], ldu, &u[
+ nlf + nlf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + nlf - 2;
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j] = j;
+/* L10: */
+ }
+ if (i__ == nd) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ nrp1 = nr + sqrei;
+ slasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &vt[
+ nrf + nrf * vt_dim1], ldvt, &u[nrf + nrf * u_dim1], ldu, &u[
+ nrf + nrf * u_dim1], ldu, &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+ itemp = idxq + ic;
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[itemp + j - 1] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+
+/*
+ Find the first node LF and last node LL on the
+ current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ if (*sqre == 0 && i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ idxqc = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ slasd1_(&nl, &nr, &sqrei, &d__[nlf], &alpha, &beta, &u[nlf + nlf *
+ u_dim1], ldu, &vt[nlf + nlf * vt_dim1], ldvt, &iwork[
+ idxqc], &iwork[iwk], &work[1], info);
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of SLASD0 */
+
+} /* slasd0_ */
+
+/* Subroutine */ int slasd1_(integer *nl, integer *nr, integer *sqre, real *
+ d__, real *alpha, real *beta, real *u, integer *ldu, real *vt,
+ integer *ldvt, integer *idxq, integer *iwork, real *work, integer *
+ info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static integer i__, k, m, n, n1, n2, iq, iz, iu2, ldq, idx, ldu2, ivt2,
+ idxc, idxp, ldvt2;
+ extern /* Subroutine */ int slasd2_(integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *, real *, integer *,
+ integer *, integer *, integer *, integer *, integer *, integer *),
+ slasd3_(integer *, integer *, integer *, integer *, real *, real
+ *, integer *, real *, real *, integer *, real *, integer *, real *
+ , integer *, real *, integer *, integer *, integer *, real *,
+ integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+ , real *, integer *, integer *), slamrg_(integer *,
+ integer *, real *, integer *, integer *, integer *);
+ static real orgnrm;
+ static integer coltyp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASD1 computes the SVD of an upper bidiagonal N-by-M matrix B,
+ where N = NL + NR + 1 and M = N + SQRE. SLASD1 is called from SLASD0.
+
+ A related subroutine SLASD7 handles the case in which the singular
+ values (and the singular vectors in factored form) are desired.
+
+ SLASD1 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The left singular vectors of the original matrix are stored in U, and
+ the transpose of the right singular vectors are stored in VT, and the
+ singular values are in D. The algorithm consists of three stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or when there are zeros in
+ the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine SLASD2.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the square roots of the
+ roots of the secular equation via the routine SLASD4 (as called
+ by SLASD3). This routine also calculates the singular vectors of
+ the current problem.
+
+ The final stage consists of computing the updated singular vectors
+ directly using the updated singular values. The singular vectors
+ for the current problem are multiplied with the singular vectors
+ from the overall problem.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) REAL array,
+ dimension (N = NL+NR+1).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block; and D(NL+2:N) contains the singular values of
+ the lower block. On exit D(1:N) contains the singular values
+ of the modified matrix.
+
+ ALPHA (input) REAL
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) REAL
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) REAL array, dimension(LDU,N)
+ On entry U(1:NL, 1:NL) contains the left singular vectors of
+ the upper block; U(NL+2:N, NL+2:N) contains the left singular
+ vectors of the lower block. On exit U contains the left
+ singular vectors of the bidiagonal matrix.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= max( 1, N ).
+
+ VT (input/output) REAL array, dimension(LDVT,M)
+ where M = N + SQRE.
+ On entry VT(1:NL+1, 1:NL+1)' contains the right singular
+ vectors of the upper block; VT(NL+2:M, NL+2:M)' contains
+ the right singular vectors of the lower block. On exit
+ VT' contains the right singular vectors of the
+ bidiagonal matrix.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= max( 1, M ).
+
+ IDXQ (output) INTEGER array, dimension(N)
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ IWORK (workspace) INTEGER array, dimension( 4 * N )
+
+ WORK (workspace) REAL array, dimension( 3*M**2 + 2*M )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --idxq;
+ --iwork;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -3;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD1", &i__1);
+ return 0;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in SLASD2 and SLASD3.
+*/
+
+ ldu2 = n;
+ ldvt2 = m;
+
+ iz = 1;
+ isigma = iz + m;
+ iu2 = isigma + n;
+ ivt2 = iu2 + ldu2 * n;
+ iq = ivt2 + ldvt2 * m;
+
+ idx = 1;
+ idxc = idx + n;
+ coltyp = idxc + n;
+ idxp = coltyp + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ orgnrm = dmax(r__1,r__2);
+ d__[*nl + 1] = 0.f;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) > orgnrm) {
+ orgnrm = (r__1 = d__[i__], dabs(r__1));
+ }
+/* L10: */
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &n, &c__1, &d__[1], &n,
+ info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Deflate singular values. */
+
+ slasd2_(nl, nr, sqre, &k, &d__[1], &work[iz], alpha, beta, &u[u_offset],
+ ldu, &vt[vt_offset], ldvt, &work[isigma], &work[iu2], &ldu2, &
+ work[ivt2], &ldvt2, &iwork[idxp], &iwork[idx], &iwork[idxc], &
+ idxq[1], &iwork[coltyp], info);
+
+/* Solve Secular Equation and update singular vectors. */
+
+ ldq = k;
+ slasd3_(nl, nr, sqre, &k, &d__[1], &work[iq], &ldq, &work[isigma], &u[
+ u_offset], ldu, &work[iu2], &ldu2, &vt[vt_offset], ldvt, &work[
+ ivt2], &ldvt2, &iwork[idxc], &iwork[coltyp], &work[iz], info);
+ if (*info != 0) {
+ return 0;
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, &n, &c__1, &d__[1], &n,
+ info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = k;
+ n2 = n - k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of SLASD1 */
+
+} /* slasd1_ */
+
+/* Subroutine */ int slasd2_(integer *nl, integer *nr, integer *sqre, integer
+ *k, real *d__, real *z__, real *alpha, real *beta, real *u, integer *
+ ldu, real *vt, integer *ldvt, real *dsigma, real *u2, integer *ldu2,
+ real *vt2, integer *ldvt2, integer *idxp, integer *idx, integer *idxc,
+ integer *idxq, integer *coltyp, integer *info)
+{
+ /* System generated locals */
+ integer u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1, vt_offset,
+ vt2_dim1, vt2_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static real c__;
+ static integer i__, j, m, n;
+ static real s;
+ static integer k2;
+ static real z1;
+ static integer ct, jp;
+ static real eps, tau, tol;
+ static integer psm[4], nlp1, nlp2, idxi, idxj, ctot[4];
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ static integer idxjp, jprev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ static real hlftol;
+ extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *), slaset_(char *, integer *,
+ integer *, real *, real *, real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASD2 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem.
+ There are two ways in which deflation can occur: when two or more
+ singular values are close together or if there is a tiny entry in the
+ Z vector. For each such occurrence the order of the related secular
+ equation problem is reduced by one.
+
+ SLASD2 is called from SLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) REAL array, dimension(N)
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ ALPHA (input) REAL
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) REAL
+ Contains the off-diagonal element associated with the added
+ row.
+
+ U (input/output) REAL array, dimension(LDU,N)
+ On entry U contains the left singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL, NL), and (NL+2, NL+2), (N,N).
+ On exit U contains the trailing (N-K) updated left singular
+ vectors (those which were deflated) in its last N-K columns.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ Z (output) REAL array, dimension(N)
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ DSIGMA (output) REAL array, dimension (N)
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ U2 (output) REAL array, dimension(LDU2,N)
+ Contains a copy of the first K-1 left singular vectors which
+ will be used by SLASD3 in a matrix multiply (SGEMM) to solve
+ for the new left singular vectors. U2 is arranged into four
+ blocks. The first block contains a column with 1 at NL+1 and
+ zero everywhere else; the second block contains non-zero
+ entries only at and above NL; the third contains non-zero
+ entries only below NL+1; and the fourth is dense.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input/output) REAL array, dimension(LDVT,M)
+ On entry VT' contains the right singular vectors of two
+ submatrices in the two square blocks with corners at (1,1),
+ (NL+1, NL+1), and (NL+2, NL+2), (M,M).
+ On exit VT' contains the trailing (N-K) updated right singular
+ vectors (those which were deflated) in its last N-K columns.
+ In case SQRE =1, the last row of VT spans the right null
+ space.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= M.
+
+ VT2 (output) REAL array, dimension(LDVT2,N)
+ VT2' contains a copy of the first K right singular vectors
+ which will be used by SLASD3 in a matrix multiply (SGEMM) to
+ solve for the new right singular vectors. VT2 is arranged into
+ three blocks. The first block contains a row that corresponds
+ to the special 0 diagonal element in SIGMA; the second block
+ contains non-zeros only at and before NL +1; the third block
+ contains non-zeros only at and after NL +2.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= M.
+
+ IDXP (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDX (workspace) INTEGER array, dimension(N)
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXC (output) INTEGER array, dimension(N)
+ This will contain the permutation used to arrange the columns
+ of the deflated U matrix into three groups: the first group
+ contains non-zero entries only at and above NL, the second
+ contains non-zero entries only below NL+2, and the third is
+ dense.
+
+ COLTYP (workspace/output) INTEGER array, dimension(N)
+ As workspace, this will contain a label which will indicate
+ which of the following types a column in the U2 matrix or a
+ row in the VT2 matrix is:
+ 1 : non-zero in the upper half only
+ 2 : non-zero in the lower half only
+ 3 : dense
+ 4 : deflated
+
+ On exit, it is an array of dimension 4, with COLTYP(I) being
+ the dimension of the I-th type columns.
+
+ IDXQ (input) INTEGER array, dimension(N)
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first hlaf of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ --dsigma;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxp;
+ --idx;
+ --idxc;
+ --idxq;
+ --coltyp;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if (*ldu < n) {
+ *info = -10;
+ } else if (*ldvt < m) {
+ *info = -12;
+ } else if (*ldu2 < n) {
+ *info = -15;
+ } else if (*ldvt2 < m) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD2", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+/*
+ Generate the first part of the vector Z; and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vt[nlp1 + nlp1 * vt_dim1];
+ z__[1] = z1;
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vt[i__ + nlp1 * vt_dim1];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vt[i__ + nlp2 * vt_dim1];
+/* L20: */
+ }
+
+/* Initialize some reference arrays. */
+
+ i__1 = nlp1;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 1;
+/* L30: */
+ }
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ coltyp[i__] = 2;
+/* L40: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L50: */
+ }
+
+/*
+ DSIGMA, IDXC, IDXC, and the first column of U2
+ are used as storage space.
+*/
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ u2[i__ + u2_dim1] = z__[idxq[i__]];
+ idxc[i__] = coltyp[idxq[i__]];
+/* L60: */
+ }
+
+ slamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = u2[idxi + u2_dim1];
+ coltyp[i__] = idxc[idxi];
+/* L70: */
+ }
+
+/* Calculate the allowable deflation tolerance */
+
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ tol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__2 = (r__1 = d__[n], dabs(r__1));
+ tol = eps * 8.f * dmax(r__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ if (j == n) {
+ goto L120;
+ }
+ } else {
+ jprev = j;
+ goto L90;
+ }
+/* L80: */
+ }
+L90:
+ j = jprev;
+L100:
+ ++j;
+ if (j > n) {
+ goto L110;
+ }
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ coltyp[j] = 4;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((r__1 = d__[j] - d__[jprev], dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ s = z__[jprev];
+ c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = slapy2_(&c__, &s);
+ c__ /= tau;
+ s = -s / tau;
+ z__[j] = tau;
+ z__[jprev] = 0.f;
+
+/*
+ Apply back the Givens rotation to the left and right
+ singular vector matrices.
+*/
+
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ srot_(&n, &u[idxjp * u_dim1 + 1], &c__1, &u[idxj * u_dim1 + 1], &
+ c__1, &c__, &s);
+ srot_(&m, &vt[idxjp + vt_dim1], ldvt, &vt[idxj + vt_dim1], ldvt, &
+ c__, &s);
+ if (coltyp[j] != coltyp[jprev]) {
+ coltyp[j] = 3;
+ }
+ coltyp[jprev] = 4;
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L100;
+L110:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ u2[*k + u2_dim1] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L120:
+
+/*
+ Count up the total number of the various types of columns, then
+ form a permutation which positions the four column types into
+ four groups of uniform structure (although one or more of these
+ groups may be empty).
+*/
+
+ for (j = 1; j <= 4; ++j) {
+ ctot[j - 1] = 0;
+/* L130: */
+ }
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ ct = coltyp[j];
+ ++ctot[ct - 1];
+/* L140: */
+ }
+
+/* PSM(*) = Position in SubMatrix (of types 1 through 4) */
+
+ psm[0] = 2;
+ psm[1] = ctot[0] + 2;
+ psm[2] = psm[1] + ctot[1];
+ psm[3] = psm[2] + ctot[2];
+
+/*
+ Fill out the IDXC array so that the permutation which it induces
+ will place all type-1 columns first, all type-2 columns next,
+ then all type-3's, and finally all type-4's, starting from the
+ second column. This applies similarly to the rows of VT.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ ct = coltyp[jp];
+ idxc[psm[ct - 1]] = j;
+ ++psm[ct - 1];
+/* L150: */
+ }
+
+/*
+ Sort the singular values and corresponding singular vectors into
+ DSIGMA, U2, and VT2 respectively. The singular values/vectors
+ which were not deflated go into the first K slots of DSIGMA, U2,
+ and VT2 respectively, while those which were deflated go into the
+ last N - K slots, except that the first column/row will be treated
+ separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ idxj = idxq[idx[idxp[idxc[j]]] + 1];
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ scopy_(&n, &u[idxj * u_dim1 + 1], &c__1, &u2[j * u2_dim1 + 1], &c__1);
+ scopy_(&m, &vt[idxj + vt_dim1], ldvt, &vt2[j + vt2_dim1], ldvt2);
+/* L160: */
+ }
+
+/* Determine DSIGMA(1), DSIGMA(2) and Z(1) */
+
+ dsigma[1] = 0.f;
+ hlftol = tol / 2.f;
+ if (dabs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = slapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ c__ = 1.f;
+ s = 0.f;
+ z__[1] = tol;
+ } else {
+ c__ = z1 / z__[1];
+ s = z__[m] / z__[1];
+ }
+ } else {
+ if (dabs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Move the rest of the updating row to Z. */
+
+ i__1 = *k - 1;
+ scopy_(&i__1, &u2[u2_dim1 + 2], &c__1, &z__[2], &c__1);
+
+/*
+ Determine the first column of U2, the first row of VT2 and the
+ last row of VT.
+*/
+
+ slaset_("A", &n, &c__1, &c_b320, &c_b320, &u2[u2_offset], ldu2)
+ ;
+ u2[nlp1 + u2_dim1] = 1.f;
+ if (m > n) {
+ i__1 = nlp1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[m + i__ * vt_dim1] = -s * vt[nlp1 + i__ * vt_dim1];
+ vt2[i__ * vt2_dim1 + 1] = c__ * vt[nlp1 + i__ * vt_dim1];
+/* L170: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[i__ * vt2_dim1 + 1] = s * vt[m + i__ * vt_dim1];
+ vt[m + i__ * vt_dim1] = c__ * vt[m + i__ * vt_dim1];
+/* L180: */
+ }
+ } else {
+ scopy_(&m, &vt[nlp1 + vt_dim1], ldvt, &vt2[vt2_dim1 + 1], ldvt2);
+ }
+ if (m > n) {
+ scopy_(&m, &vt[m + vt_dim1], ldvt, &vt2[m + vt2_dim1], ldvt2);
+ }
+
+/*
+ The deflated singular values and their corresponding vectors go
+ into the back of D, U, and V respectively.
+*/
+
+ if (n > *k) {
+ i__1 = n - *k;
+ scopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+ i__1 = n - *k;
+ slacpy_("A", &n, &i__1, &u2[(*k + 1) * u2_dim1 + 1], ldu2, &u[(*k + 1)
+ * u_dim1 + 1], ldu);
+ i__1 = n - *k;
+ slacpy_("A", &i__1, &m, &vt2[*k + 1 + vt2_dim1], ldvt2, &vt[*k + 1 +
+ vt_dim1], ldvt);
+ }
+
+/* Copy CTOT into COLTYP for referencing in SLASD3. */
+
+ for (j = 1; j <= 4; ++j) {
+ coltyp[j] = ctot[j - 1];
+/* L190: */
+ }
+
+ return 0;
+
+/* End of SLASD2 */
+
+} /* slasd2_ */
+
+/* Subroutine */ int slasd3_(integer *nl, integer *nr, integer *sqre, integer
+ *k, real *d__, real *q, integer *ldq, real *dsigma, real *u, integer *
+ ldu, real *u2, integer *ldu2, real *vt, integer *ldvt, real *vt2,
+ integer *ldvt2, integer *idxc, integer *ctot, real *z__, integer *
+ info)
+{
+ /* System generated locals */
+ integer q_dim1, q_offset, u_dim1, u_offset, u2_dim1, u2_offset, vt_dim1,
+ vt_offset, vt2_dim1, vt2_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer i__, j, m, n, jc;
+ static real rho;
+ static integer nlp1, nlp2, nrp1;
+ static real temp;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static integer ctemp;
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer ktemp;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slasd4_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *), xerbla_(char *,
+ integer *), slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASD3 finds all the square roots of the roots of the secular
+ equation, as defined by the values in D and Z. It makes the
+ appropriate calls to SLASD4 and then updates the singular
+ vectors by matrix multiplication.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray XMP, Cray YMP, Cray C 90, or Cray 2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ SLASD3 is called from SLASD1.
+
+ Arguments
+ =========
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (input) INTEGER
+ The size of the secular equation, 1 =< K = < N.
+
+ D (output) REAL array, dimension(K)
+ On exit the square roots of the roots of the secular equation,
+ in ascending order.
+
+ Q (workspace) REAL array,
+ dimension at least (LDQ,K).
+
+ LDQ (input) INTEGER
+ The leading dimension of the array Q. LDQ >= K.
+
+ DSIGMA (input) REAL array, dimension(K)
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ U (input) REAL array, dimension (LDU, N)
+ The last N - K columns of this matrix contain the deflated
+ left singular vectors.
+
+ LDU (input) INTEGER
+ The leading dimension of the array U. LDU >= N.
+
+ U2 (input) REAL array, dimension (LDU2, N)
+ The first K columns of this matrix contain the non-deflated
+ left singular vectors for the split problem.
+
+ LDU2 (input) INTEGER
+ The leading dimension of the array U2. LDU2 >= N.
+
+ VT (input) REAL array, dimension (LDVT, M)
+ The last M - K columns of VT' contain the deflated
+ right singular vectors.
+
+ LDVT (input) INTEGER
+ The leading dimension of the array VT. LDVT >= N.
+
+ VT2 (input) REAL array, dimension (LDVT2, N)
+ The first K columns of VT2' contain the non-deflated
+ right singular vectors for the split problem.
+
+ LDVT2 (input) INTEGER
+ The leading dimension of the array VT2. LDVT2 >= N.
+
+ IDXC (input) INTEGER array, dimension ( N )
+ The permutation used to arrange the columns of U (and rows of
+ VT) into three groups: the first group contains non-zero
+ entries only at and above (or before) NL +1; the second
+ contains non-zero entries only at and below (or after) NL+2;
+ and the third is dense. The first column of U and the row of
+ VT are treated separately, however.
+
+ The rows of the singular vectors found by SLASD4
+ must be likewise permuted before the matrix multiplies can
+ take place.
+
+ CTOT (input) INTEGER array, dimension ( 4 )
+ A count of the total number of the various types of columns
+ in U (or rows in VT), as described in IDXC. The fourth column
+ type is any column which has been deflated.
+
+ Z (input) REAL array, dimension (K)
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ q_dim1 = *ldq;
+ q_offset = 1 + q_dim1;
+ q -= q_offset;
+ --dsigma;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ u2_dim1 = *ldu2;
+ u2_offset = 1 + u2_dim1;
+ u2 -= u2_offset;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ vt2_dim1 = *ldvt2;
+ vt2_offset = 1 + vt2_dim1;
+ vt2 -= vt2_offset;
+ --idxc;
+ --ctot;
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+
+ if (*nl < 1) {
+ *info = -1;
+ } else if (*nr < 1) {
+ *info = -2;
+ } else if (*sqre != 1 && *sqre != 0) {
+ *info = -3;
+ }
+
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+
+ if ((*k < 1) || (*k > n)) {
+ *info = -4;
+ } else if (*ldq < *k) {
+ *info = -7;
+ } else if (*ldu < n) {
+ *info = -10;
+ } else if (*ldu2 < n) {
+ *info = -12;
+ } else if (*ldvt < m) {
+ *info = -14;
+ } else if (*ldvt2 < m) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD3", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = dabs(z__[1]);
+ scopy_(&m, &vt2[vt2_dim1 + 1], ldvt2, &vt[vt_dim1 + 1], ldvt);
+ if (z__[1] > 0.f) {
+ scopy_(&n, &u2[u2_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
+ } else {
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ u[i__ + u_dim1] = -u2[i__ + u2_dim1];
+/* L10: */
+ }
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = slamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L20: */
+ }
+
+/* Keep a copy of Z. */
+
+ scopy_(k, &z__[1], &c__1, &q[q_offset], &c__1);
+
+/* Normalize Z. */
+
+ rho = snrm2_(k, &z__[1], &c__1);
+ slascl_("G", &c__0, &c__0, &rho, &c_b1011, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Find the new singular values. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slasd4_(k, &j, &dsigma[1], &z__[1], &u[j * u_dim1 + 1], &rho, &d__[j],
+ &vt[j * vt_dim1 + 1], info);
+
+/* If the zero finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+/* L30: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ z__[i__] = u[i__ + *k * u_dim1] * vt[i__ + *k * vt_dim1];
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j]) / (dsigma[i__] + dsigma[j]);
+/* L40: */
+ }
+ i__2 = *k - 1;
+ for (j = i__; j <= i__2; ++j) {
+ z__[i__] *= u[i__ + j * u_dim1] * vt[i__ + j * vt_dim1] / (dsigma[
+ i__] - dsigma[j + 1]) / (dsigma[i__] + dsigma[j + 1]);
+/* L50: */
+ }
+ r__2 = sqrt((r__1 = z__[i__], dabs(r__1)));
+ z__[i__] = r_sign(&r__2, &q[i__ + q_dim1]);
+/* L60: */
+ }
+
+/*
+ Compute left singular vectors of the modified diagonal matrix,
+ and store related information for the right singular vectors.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ vt[i__ * vt_dim1 + 1] = z__[1] / u[i__ * u_dim1 + 1] / vt[i__ *
+ vt_dim1 + 1];
+ u[i__ * u_dim1 + 1] = -1.f;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ vt[j + i__ * vt_dim1] = z__[j] / u[j + i__ * u_dim1] / vt[j + i__
+ * vt_dim1];
+ u[j + i__ * u_dim1] = dsigma[j] * vt[j + i__ * vt_dim1];
+/* L70: */
+ }
+ temp = snrm2_(k, &u[i__ * u_dim1 + 1], &c__1);
+ q[i__ * q_dim1 + 1] = u[i__ * u_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[j + i__ * q_dim1] = u[jc + i__ * u_dim1] / temp;
+/* L80: */
+ }
+/* L90: */
+ }
+
+/* Update the left singular vector matrix. */
+
+ if (*k == 2) {
+ sgemm_("N", "N", &n, k, k, &c_b1011, &u2[u2_offset], ldu2, &q[
+ q_offset], ldq, &c_b320, &u[u_offset], ldu);
+ goto L100;
+ }
+ if (ctot[1] > 0) {
+ sgemm_("N", "N", nl, k, &ctot[1], &c_b1011, &u2[((u2_dim1) << (1)) +
+ 1], ldu2, &q[q_dim1 + 2], ldq, &c_b320, &u[u_dim1 + 1], ldu);
+ if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ sgemm_("N", "N", nl, k, &ctot[3], &c_b1011, &u2[ktemp * u2_dim1 +
+ 1], ldu2, &q[ktemp + q_dim1], ldq, &c_b1011, &u[u_dim1 +
+ 1], ldu);
+ }
+ } else if (ctot[3] > 0) {
+ ktemp = ctot[1] + 2 + ctot[2];
+ sgemm_("N", "N", nl, k, &ctot[3], &c_b1011, &u2[ktemp * u2_dim1 + 1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b320, &u[u_dim1 + 1], ldu);
+ } else {
+ slacpy_("F", nl, k, &u2[u2_offset], ldu2, &u[u_offset], ldu);
+ }
+ scopy_(k, &q[q_dim1 + 1], ldq, &u[nlp1 + u_dim1], ldu);
+ ktemp = ctot[1] + 2;
+ ctemp = ctot[2] + ctot[3];
+ sgemm_("N", "N", nr, k, &ctemp, &c_b1011, &u2[nlp2 + ktemp * u2_dim1],
+ ldu2, &q[ktemp + q_dim1], ldq, &c_b320, &u[nlp2 + u_dim1], ldu);
+
+/* Generate the right singular vectors. */
+
+L100:
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = snrm2_(k, &vt[i__ * vt_dim1 + 1], &c__1);
+ q[i__ + q_dim1] = vt[i__ * vt_dim1 + 1] / temp;
+ i__2 = *k;
+ for (j = 2; j <= i__2; ++j) {
+ jc = idxc[j];
+ q[i__ + j * q_dim1] = vt[jc + i__ * vt_dim1] / temp;
+/* L110: */
+ }
+/* L120: */
+ }
+
+/* Update the right singular vector matrix. */
+
+ if (*k == 2) {
+ sgemm_("N", "N", k, &m, k, &c_b1011, &q[q_offset], ldq, &vt2[
+ vt2_offset], ldvt2, &c_b320, &vt[vt_offset], ldvt);
+ return 0;
+ }
+ ktemp = ctot[1] + 1;
+ sgemm_("N", "N", k, &nlp1, &ktemp, &c_b1011, &q[q_dim1 + 1], ldq, &vt2[
+ vt2_dim1 + 1], ldvt2, &c_b320, &vt[vt_dim1 + 1], ldvt);
+ ktemp = ctot[1] + 2 + ctot[2];
+ if (ktemp <= *ldvt2) {
+ sgemm_("N", "N", k, &nlp1, &ctot[3], &c_b1011, &q[ktemp * q_dim1 + 1],
+ ldq, &vt2[ktemp + vt2_dim1], ldvt2, &c_b1011, &vt[vt_dim1 +
+ 1], ldvt);
+ }
+
+ ktemp = ctot[1] + 1;
+ nrp1 = *nr + *sqre;
+ if (ktemp > 1) {
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ q[i__ + ktemp * q_dim1] = q[i__ + q_dim1];
+/* L130: */
+ }
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ vt2[ktemp + i__ * vt2_dim1] = vt2[i__ * vt2_dim1 + 1];
+/* L140: */
+ }
+ }
+ ctemp = ctot[2] + 1 + ctot[3];
+ sgemm_("N", "N", k, &nrp1, &ctemp, &c_b1011, &q[ktemp * q_dim1 + 1], ldq,
+ &vt2[ktemp + nlp2 * vt2_dim1], ldvt2, &c_b320, &vt[nlp2 * vt_dim1
+ + 1], ldvt);
+
+ return 0;
+
+/* End of SLASD3 */
+
+} /* slasd3_ */
+
+/* Subroutine */ int slasd4_(integer *n, integer *i__, real *d__, real *z__,
+ real *delta, real *rho, real *sigma, real *work, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real a, b, c__;
+ static integer j;
+ static real w, dd[3];
+ static integer ii;
+ static real dw, zz[3];
+ static integer ip1;
+ static real eta, phi, eps, tau, psi;
+ static integer iim1, iip1;
+ static real dphi, dpsi;
+ static integer iter;
+ static real temp, prew, sg2lb, sg2ub, temp1, temp2, dtiim, delsq, dtiip;
+ static integer niter;
+ static real dtisq;
+ static logical swtch;
+ static real dtnsq;
+ extern /* Subroutine */ int slaed6_(integer *, logical *, real *, real *,
+ real *, real *, real *, integer *);
+ static real delsq2;
+ extern /* Subroutine */ int slasd5_(integer *, real *, real *, real *,
+ real *, real *, real *);
+ static real dtnsq1;
+ static logical swtch3;
+ extern doublereal slamch_(char *);
+ static logical orgati;
+ static real erretm, dtipsq, rhoinv;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th updated
+ eigenvalue of a positive symmetric rank-one modification to
+ a positive diagonal matrix whose entries are given as the squares
+ of the corresponding entries in the array d, and that
+
+ 0 <= D(i) < D(j) for i < j
+
+ and that RHO > 0. This is arranged by the calling routine, and is
+ no loss in generality. The rank-one modified system is thus
+
+ diag( D ) * diag( D ) + RHO * Z * Z_transpose.
+
+ where we assume the Euclidean norm of Z is 1.
+
+ The method consists of approximating the rational functions in the
+ secular equation by simpler interpolating rational functions.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The length of all arrays.
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. 1 <= I <= N.
+
+ D (input) REAL array, dimension ( N )
+ The original eigenvalues. It is assumed that they are in
+ order, 0 <= D(I) < D(J) for I < J.
+
+ Z (input) REAL array, dimension ( N )
+ The components of the updating vector.
+
+ DELTA (output) REAL array, dimension ( N )
+ If N .ne. 1, DELTA contains (D(j) - sigma_I) in its j-th
+ component. If N = 1, then DELTA(1) = 1. The vector DELTA
+ contains the information necessary to construct the
+ (singular) eigenvectors.
+
+ RHO (input) REAL
+ The scalar in the symmetric updating formula.
+
+ SIGMA (output) REAL
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) REAL array, dimension ( N )
+ If N .ne. 1, WORK contains (D(j) + sigma_I) in its j-th
+ component. If N = 1, then WORK( 1 ) = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ > 0: if INFO = 1, the updating process failed.
+
+ Internal Parameters
+ ===================
+
+ Logical variable ORGATI (origin-at-i?) is used for distinguishing
+ whether D(i) or D(i+1) is treated as the origin.
+
+ ORGATI = .true. origin at i
+ ORGATI = .false. origin at i+1
+
+ Logical variable SWTCH3 (switch-for-3-poles?) is for noting
+ if we are working with THREE poles!
+
+ MAXIT is the maximum number of iterations allowed for each
+ eigenvalue.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+
+
+ Since this routine is called in an inner loop, we do no argument
+ checking.
+
+ Quick return for N=1 and 2.
+*/
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n == 1) {
+
+/* Presumably, I=1 upon entry */
+
+ *sigma = sqrt(d__[1] * d__[1] + *rho * z__[1] * z__[1]);
+ delta[1] = 1.f;
+ work[1] = 1.f;
+ return 0;
+ }
+ if (*n == 2) {
+ slasd5_(i__, &d__[1], &z__[1], &delta[1], rho, sigma, &work[1]);
+ return 0;
+ }
+
+/* Compute machine epsilon */
+
+ eps = slamch_("Epsilon");
+ rhoinv = 1.f / *rho;
+
+/* The case I = N */
+
+ if (*i__ == *n) {
+
+/* Initialize some basic variables */
+
+ ii = *n - 1;
+ niter = 1;
+
+/* Calculate initial guess */
+
+ temp = *rho / 2.f;
+
+/*
+ If ||Z||_2 is not one, then TEMP should be set to
+ RHO * ||Z||_2^2 / TWO
+*/
+
+ temp1 = temp / (d__[*n] + sqrt(d__[*n] * d__[*n] + temp));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*n] + temp1;
+ delta[j] = d__[j] - d__[*n] - temp1;
+/* L10: */
+ }
+
+ psi = 0.f;
+ i__1 = *n - 2;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (delta[j] * work[j]);
+/* L20: */
+ }
+
+ c__ = rhoinv + psi;
+ w = c__ + z__[ii] * z__[ii] / (delta[ii] * work[ii]) + z__[*n] * z__[*
+ n] / (delta[*n] * work[*n]);
+
+ if (w <= 0.f) {
+ temp1 = sqrt(d__[*n] * d__[*n] + *rho);
+ temp = z__[*n - 1] * z__[*n - 1] / ((d__[*n - 1] + temp1) * (d__[*
+ n] - d__[*n - 1] + *rho / (d__[*n] + temp1))) + z__[*n] *
+ z__[*n] / *rho;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (c__ <= temp) {
+ tau = *rho;
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*
+ n];
+ b = z__[*n] * z__[*n] * delsq;
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+ }
+
+/*
+ It can be proved that
+ D(N)^2+RHO/2 <= SIGMA_n^2 < D(N)^2+TAU <= D(N)^2+RHO
+*/
+
+ } else {
+ delsq = (d__[*n] - d__[*n - 1]) * (d__[*n] + d__[*n - 1]);
+ a = -c__ * delsq + z__[*n - 1] * z__[*n - 1] + z__[*n] * z__[*n];
+ b = z__[*n] * z__[*n] * delsq;
+
+/*
+ The following TAU is to approximate
+ SIGMA_n^2 - D( N )*D( N )
+*/
+
+ if (a < 0.f) {
+ tau = b * 2.f / (sqrt(a * a + b * 4.f * c__) - a);
+ } else {
+ tau = (a + sqrt(a * a + b * 4.f * c__)) / (c__ * 2.f);
+ }
+
+/*
+ It can be proved that
+ D(N)^2 < D(N)^2+TAU < SIGMA(N)^2 < D(N)^2+RHO/2
+*/
+
+ }
+
+/* The following ETA is to approximate SIGMA_n - D( N ) */
+
+ eta = tau / (d__[*n] + sqrt(d__[*n] * d__[*n] + tau));
+
+ *sigma = d__[*n] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] = d__[j] - d__[*i__] - eta;
+ work[j] = d__[j] + d__[*i__] + eta;
+/* L30: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (delta[j] * work[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L40: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (delta[*n] * work[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq * dtnsq1 * (dpsi + dphi);
+ b = dtnsq * dtnsq1 * w;
+ if (c__ < 0.f) {
+ c__ = dabs(c__);
+ }
+ if (c__ == 0.f) {
+ eta = *rho - *sigma * *sigma;
+ } else if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) / (
+ c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp > *rho) {
+ eta = *rho + dtnsq;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L50: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L60: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) * (
+ dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+
+/* Main loop to update the values of the array DELTA */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ dtnsq1 = work[*n - 1] * delta[*n - 1];
+ dtnsq = work[*n] * delta[*n];
+ c__ = w - dtnsq1 * dpsi - dtnsq * dphi;
+ a = (dtnsq + dtnsq1) * w - dtnsq1 * dtnsq * (dpsi + dphi);
+ b = dtnsq1 * dtnsq * w;
+ if (a >= 0.f) {
+ eta = (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta > 0.f) {
+ eta = -w / (dpsi + dphi);
+ }
+ temp = eta - dtnsq;
+ if (temp <= 0.f) {
+ eta /= 2.f;
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(eta + *sigma * *sigma);
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ delta[j] -= eta;
+ work[j] += eta;
+/* L70: */
+ }
+
+ *sigma += eta;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = ii;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L80: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ temp = z__[*n] / (work[*n] * delta[*n]);
+ phi = z__[*n] * temp;
+ dphi = temp * temp;
+ erretm = (-phi - psi) * 8.f + erretm - phi + rhoinv + dabs(tau) *
+ (dpsi + dphi);
+
+ w = rhoinv + phi + psi;
+/* L90: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+ goto L240;
+
+/* End for the case I = N */
+
+ } else {
+
+/* The case for I < N */
+
+ niter = 1;
+ ip1 = *i__ + 1;
+
+/* Calculate initial guess */
+
+ delsq = (d__[ip1] - d__[*i__]) * (d__[ip1] + d__[*i__]);
+ delsq2 = delsq / 2.f;
+ temp = delsq2 / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + delsq2));
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + temp;
+ delta[j] = d__[j] - d__[*i__] - temp;
+/* L100: */
+ }
+
+ psi = 0.f;
+ i__1 = *i__ - 1;
+ for (j = 1; j <= i__1; ++j) {
+ psi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L110: */
+ }
+
+ phi = 0.f;
+ i__1 = *i__ + 2;
+ for (j = *n; j >= i__1; --j) {
+ phi += z__[j] * z__[j] / (work[j] * delta[j]);
+/* L120: */
+ }
+ c__ = rhoinv + psi + phi;
+ w = c__ + z__[*i__] * z__[*i__] / (work[*i__] * delta[*i__]) + z__[
+ ip1] * z__[ip1] / (work[ip1] * delta[ip1]);
+
+ if (w > 0.f) {
+
+/*
+ d(i)^2 < the ith sigma^2 < (d(i)^2+d(i+1)^2)/2
+
+ We choose d(i) as origin.
+*/
+
+ orgati = TRUE_;
+ sg2lb = 0.f;
+ sg2ub = delsq2;
+ a = c__ * delsq + z__[*i__] * z__[*i__] + z__[ip1] * z__[ip1];
+ b = z__[*i__] * z__[*i__] * delsq;
+ if (a > 0.f) {
+ tau = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( I )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( I ).
+*/
+
+ eta = tau / (d__[*i__] + sqrt(d__[*i__] * d__[*i__] + tau));
+ } else {
+
+/*
+ (d(i)^2+d(i+1)^2)/2 <= the ith sigma^2 < d(i+1)^2/2
+
+ We choose d(i+1) as origin.
+*/
+
+ orgati = FALSE_;
+ sg2lb = -delsq2;
+ sg2ub = 0.f;
+ a = c__ * delsq - z__[*i__] * z__[*i__] - z__[ip1] * z__[ip1];
+ b = z__[ip1] * z__[ip1] * delsq;
+ if (a < 0.f) {
+ tau = b * 2.f / (a - sqrt((r__1 = a * a + b * 4.f * c__, dabs(
+ r__1))));
+ } else {
+ tau = -(a + sqrt((r__1 = a * a + b * 4.f * c__, dabs(r__1))))
+ / (c__ * 2.f);
+ }
+
+/*
+ TAU now is an estimation of SIGMA^2 - D( IP1 )^2. The
+ following, however, is the corresponding estimation of
+ SIGMA - D( IP1 ).
+*/
+
+ eta = tau / (d__[ip1] + sqrt((r__1 = d__[ip1] * d__[ip1] + tau,
+ dabs(r__1))));
+ }
+
+ if (orgati) {
+ ii = *i__;
+ *sigma = d__[*i__] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[*i__] + eta;
+ delta[j] = d__[j] - d__[*i__] - eta;
+/* L130: */
+ }
+ } else {
+ ii = *i__ + 1;
+ *sigma = d__[ip1] + eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] = d__[j] + d__[ip1] + eta;
+ delta[j] = d__[j] - d__[ip1] - eta;
+/* L140: */
+ }
+ }
+ iim1 = ii - 1;
+ iip1 = ii + 1;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L150: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L160: */
+ }
+
+ w = rhoinv + phi + psi;
+
+/*
+ W is the value of the secular function with
+ its ii-th element removed.
+*/
+
+ swtch3 = FALSE_;
+ if (orgati) {
+ if (w < 0.f) {
+ swtch3 = TRUE_;
+ }
+ } else {
+ if (w > 0.f) {
+ swtch3 = TRUE_;
+ }
+ }
+ if ((ii == 1) || (ii == *n)) {
+ swtch3 = FALSE_;
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w += temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+/* Calculate the new step */
+
+ ++niter;
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (r__1 * r__1);
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq * (dpsi +
+ dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (dpsi +
+ dphi);
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1)))) /
+ (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__, dabs(
+ r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - (d__[iim1] - d__[iip1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - (d__[iip1] - d__[iim1]) *
+ (d__[iim1] + d__[iip1]) * temp1;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ zz[1] = z__[ii] * z__[ii];
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ slaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if ((temp > sg2ub) || (temp < sg2lb)) {
+ if (w < 0.f) {
+ eta = (sg2ub - tau) / 2.f;
+ } else {
+ eta = (sg2lb - tau) / 2.f;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ prew = w;
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L170: */
+ }
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L180: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L190: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) * 3.f
+ + dabs(tau) * dw;
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+ swtch = FALSE_;
+ if (orgati) {
+ if (-w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ } else {
+ if (w > dabs(prew) / 10.f) {
+ swtch = TRUE_;
+ }
+ }
+
+/* Main loop to update the values of the array DELTA and WORK */
+
+ iter = niter + 1;
+
+ for (niter = iter; niter <= 20; ++niter) {
+
+/* Test for convergence */
+
+ if (dabs(w) <= eps * erretm) {
+ goto L240;
+ }
+
+/* Calculate the new step */
+
+ if (! swtch3) {
+ dtipsq = work[ip1] * delta[ip1];
+ dtisq = work[*i__] * delta[*i__];
+ if (! swtch) {
+ if (orgati) {
+/* Computing 2nd power */
+ r__1 = z__[*i__] / dtisq;
+ c__ = w - dtipsq * dw + delsq * (r__1 * r__1);
+ } else {
+/* Computing 2nd power */
+ r__1 = z__[ip1] / dtipsq;
+ c__ = w - dtisq * dw - delsq * (r__1 * r__1);
+ }
+ } else {
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ if (orgati) {
+ dpsi += temp * temp;
+ } else {
+ dphi += temp * temp;
+ }
+ c__ = w - dtisq * dpsi - dtipsq * dphi;
+ }
+ a = (dtipsq + dtisq) * w - dtipsq * dtisq * dw;
+ b = dtipsq * dtisq * w;
+ if (c__ == 0.f) {
+ if (a == 0.f) {
+ if (! swtch) {
+ if (orgati) {
+ a = z__[*i__] * z__[*i__] + dtipsq * dtipsq *
+ (dpsi + dphi);
+ } else {
+ a = z__[ip1] * z__[ip1] + dtisq * dtisq * (
+ dpsi + dphi);
+ }
+ } else {
+ a = dtisq * dtisq * dpsi + dtipsq * dtipsq * dphi;
+ }
+ }
+ eta = b / a;
+ } else if (a <= 0.f) {
+ eta = (a - sqrt((r__1 = a * a - b * 4.f * c__, dabs(r__1))
+ )) / (c__ * 2.f);
+ } else {
+ eta = b * 2.f / (a + sqrt((r__1 = a * a - b * 4.f * c__,
+ dabs(r__1))));
+ }
+ } else {
+
+/* Interpolation using THREE most relevant poles */
+
+ dtiim = work[iim1] * delta[iim1];
+ dtiip = work[iip1] * delta[iip1];
+ temp = rhoinv + psi + phi;
+ if (swtch) {
+ c__ = temp - dtiim * dpsi - dtiip * dphi;
+ zz[0] = dtiim * dtiim * dpsi;
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ if (orgati) {
+ temp1 = z__[iim1] / dtiim;
+ temp1 *= temp1;
+ temp2 = (d__[iim1] - d__[iip1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiip * (dpsi + dphi) - temp2;
+ zz[0] = z__[iim1] * z__[iim1];
+ if (dpsi < temp1) {
+ zz[2] = dtiip * dtiip * dphi;
+ } else {
+ zz[2] = dtiip * dtiip * (dpsi - temp1 + dphi);
+ }
+ } else {
+ temp1 = z__[iip1] / dtiip;
+ temp1 *= temp1;
+ temp2 = (d__[iip1] - d__[iim1]) * (d__[iim1] + d__[
+ iip1]) * temp1;
+ c__ = temp - dtiim * (dpsi + dphi) - temp2;
+ if (dphi < temp1) {
+ zz[0] = dtiim * dtiim * dpsi;
+ } else {
+ zz[0] = dtiim * dtiim * (dpsi + (dphi - temp1));
+ }
+ zz[2] = z__[iip1] * z__[iip1];
+ }
+ }
+ dd[0] = dtiim;
+ dd[1] = delta[ii] * work[ii];
+ dd[2] = dtiip;
+ slaed6_(&niter, &orgati, &c__, dd, zz, &w, &eta, info);
+ if (*info != 0) {
+ goto L240;
+ }
+ }
+
+/*
+ Note, eta should be positive if w is negative, and
+ eta should be negative otherwise. However,
+ if for some reason caused by roundoff, eta*w > 0,
+ we simply use one Newton step instead. This way
+ will guarantee eta*w < 0.
+*/
+
+ if (w * eta >= 0.f) {
+ eta = -w / dw;
+ }
+ if (orgati) {
+ temp1 = work[*i__] * delta[*i__];
+ temp = eta - temp1;
+ } else {
+ temp1 = work[ip1] * delta[ip1];
+ temp = eta - temp1;
+ }
+ if ((temp > sg2ub) || (temp < sg2lb)) {
+ if (w < 0.f) {
+ eta = (sg2ub - tau) / 2.f;
+ } else {
+ eta = (sg2lb - tau) / 2.f;
+ }
+ }
+
+ tau += eta;
+ eta /= *sigma + sqrt(*sigma * *sigma + eta);
+
+ *sigma += eta;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ work[j] += eta;
+ delta[j] -= eta;
+/* L200: */
+ }
+
+ prew = w;
+
+/* Evaluate PSI and the derivative DPSI */
+
+ dpsi = 0.f;
+ psi = 0.f;
+ erretm = 0.f;
+ i__1 = iim1;
+ for (j = 1; j <= i__1; ++j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ psi += z__[j] * temp;
+ dpsi += temp * temp;
+ erretm += psi;
+/* L210: */
+ }
+ erretm = dabs(erretm);
+
+/* Evaluate PHI and the derivative DPHI */
+
+ dphi = 0.f;
+ phi = 0.f;
+ i__1 = iip1;
+ for (j = *n; j >= i__1; --j) {
+ temp = z__[j] / (work[j] * delta[j]);
+ phi += z__[j] * temp;
+ dphi += temp * temp;
+ erretm += phi;
+/* L220: */
+ }
+
+ temp = z__[ii] / (work[ii] * delta[ii]);
+ dw = dpsi + dphi + temp * temp;
+ temp = z__[ii] * temp;
+ w = rhoinv + phi + psi + temp;
+ erretm = (phi - psi) * 8.f + erretm + rhoinv * 2.f + dabs(temp) *
+ 3.f + dabs(tau) * dw;
+ if (w * prew > 0.f && dabs(w) > dabs(prew) / 10.f) {
+ swtch = ! swtch;
+ }
+
+ if (w <= 0.f) {
+ sg2lb = dmax(sg2lb,tau);
+ } else {
+ sg2ub = dmin(sg2ub,tau);
+ }
+
+/* L230: */
+ }
+
+/* Return with INFO = 1, NITER = MAXIT and not converged */
+
+ *info = 1;
+
+ }
+
+L240:
+ return 0;
+
+/* End of SLASD4 */
+
+} /* slasd4_ */
+
+/* Subroutine */ int slasd5_(integer *i__, real *d__, real *z__, real *delta,
+ real *rho, real *dsigma, real *work)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real b, c__, w, del, tau, delsq;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ This subroutine computes the square root of the I-th eigenvalue
+ of a positive symmetric rank-one modification of a 2-by-2 diagonal
+ matrix
+
+ diag( D ) * diag( D ) + RHO * Z * transpose(Z) .
+
+ The diagonal entries in the array D are assumed to satisfy
+
+ 0 <= D(i) < D(j) for i < j .
+
+ We also assume RHO > 0 and that the Euclidean norm of the vector
+ Z is one.
+
+ Arguments
+ =========
+
+ I (input) INTEGER
+ The index of the eigenvalue to be computed. I = 1 or I = 2.
+
+ D (input) REAL array, dimension ( 2 )
+ The original eigenvalues. We assume 0 <= D(1) < D(2).
+
+ Z (input) REAL array, dimension ( 2 )
+ The components of the updating vector.
+
+ DELTA (output) REAL array, dimension ( 2 )
+ Contains (D(j) - lambda_I) in its j-th component.
+ The vector DELTA contains the information necessary
+ to construct the eigenvectors.
+
+ RHO (input) REAL
+ The scalar in the symmetric updating formula.
+
+ DSIGMA (output) REAL
+ The computed lambda_I, the I-th updated eigenvalue.
+
+ WORK (workspace) REAL array, dimension ( 2 )
+ WORK contains (D(j) + sigma_I) in its j-th component.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ren-Cang Li, Computer Science Division, University of California
+ at Berkeley, USA
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --delta;
+ --z__;
+ --d__;
+
+ /* Function Body */
+ del = d__[2] - d__[1];
+ delsq = del * (d__[2] + d__[1]);
+ if (*i__ == 1) {
+ w = *rho * 4.f * (z__[2] * z__[2] / (d__[1] + d__[2] * 3.f) - z__[1] *
+ z__[1] / (d__[1] * 3.f + d__[2])) / del + 1.f;
+ if (w > 0.f) {
+ b = delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[1] * z__[1] * delsq;
+
+/*
+ B > ZERO, always
+
+ The following TAU is DSIGMA * DSIGMA - D( 1 ) * D( 1 )
+*/
+
+ tau = c__ * 2.f / (b + sqrt((r__1 = b * b - c__ * 4.f, dabs(r__1))
+ ));
+
+/* The following TAU is DSIGMA - D( 1 ) */
+
+ tau /= d__[1] + sqrt(d__[1] * d__[1] + tau);
+ *dsigma = d__[1] + tau;
+ delta[1] = -tau;
+ delta[2] = del - tau;
+ work[1] = d__[1] * 2.f + tau;
+ work[2] = d__[1] + tau + d__[2];
+/*
+ DELTA( 1 ) = -Z( 1 ) / TAU
+ DELTA( 2 ) = Z( 2 ) / ( DEL-TAU )
+*/
+ } else {
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.f) {
+ tau = c__ * -2.f / (b + sqrt(b * b + c__ * 4.f));
+ } else {
+ tau = (b - sqrt(b * b + c__ * 4.f)) / 2.f;
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt((r__1 = d__[2] * d__[2] + tau, dabs(r__1)));
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2.f + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+*/
+ }
+/*
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ } else {
+
+/* Now I=2 */
+
+ b = -delsq + *rho * (z__[1] * z__[1] + z__[2] * z__[2]);
+ c__ = *rho * z__[2] * z__[2] * delsq;
+
+/* The following TAU is DSIGMA * DSIGMA - D( 2 ) * D( 2 ) */
+
+ if (b > 0.f) {
+ tau = (b + sqrt(b * b + c__ * 4.f)) / 2.f;
+ } else {
+ tau = c__ * 2.f / (-b + sqrt(b * b + c__ * 4.f));
+ }
+
+/* The following TAU is DSIGMA - D( 2 ) */
+
+ tau /= d__[2] + sqrt(d__[2] * d__[2] + tau);
+ *dsigma = d__[2] + tau;
+ delta[1] = -(del + tau);
+ delta[2] = -tau;
+ work[1] = d__[1] + tau + d__[2];
+ work[2] = d__[2] * 2.f + tau;
+/*
+ DELTA( 1 ) = -Z( 1 ) / ( DEL+TAU )
+ DELTA( 2 ) = -Z( 2 ) / TAU
+ TEMP = SQRT( DELTA( 1 )*DELTA( 1 )+DELTA( 2 )*DELTA( 2 ) )
+ DELTA( 1 ) = DELTA( 1 ) / TEMP
+ DELTA( 2 ) = DELTA( 2 ) / TEMP
+*/
+ }
+ return 0;
+
+/* End of SLASD5 */
+
+} /* slasd5_ */
+
+/* Subroutine */ int slasd6_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, real *d__, real *vf, real *vl, real *alpha, real *beta,
+ integer *idxq, integer *perm, integer *givptr, integer *givcol,
+ integer *ldgcol, real *givnum, integer *ldgnum, real *poles, real *
+ difl, real *difr, real *z__, integer *k, real *c__, real *s, real *
+ work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static integer i__, m, n, n1, n2, iw, idx, idxc, idxp, ivfw, ivlw;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasd7_(integer *, integer *, integer *, integer *,
+ integer *, real *, real *, real *, real *, real *, real *, real *,
+ real *, real *, real *, integer *, integer *, integer *, integer
+ *, integer *, integer *, integer *, real *, integer *, real *,
+ real *, integer *), slasd8_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *, real *, real *,
+ integer *);
+ static integer isigma;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+ , real *, integer *, integer *), slamrg_(integer *,
+ integer *, real *, integer *, integer *, integer *);
+ static real orgnrm;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASD6 computes the SVD of an updated upper bidiagonal matrix B
+ obtained by merging two smaller ones by appending a row. This
+ routine is used only for the problem which requires all singular
+ values and optionally singular vector matrices in factored form.
+ B is an N-by-M matrix with N = NL + NR + 1 and M = N + SQRE.
+ A related subroutine, SLASD1, handles the case in which all singular
+ values and singular vectors of the bidiagonal matrix are desired.
+
+ SLASD6 computes the SVD as follows:
+
+ ( D1(in) 0 0 0 )
+ B = U(in) * ( Z1' a Z2' b ) * VT(in)
+ ( 0 0 D2(in) 0 )
+
+ = U(out) * ( D(out) 0) * VT(out)
+
+ where Z' = (Z1' a Z2' b) = u' VT', and u is a vector of dimension M
+ with ALPHA and BETA in the NL+1 and NL+2 th entries and zeros
+ elsewhere; and the entry b is empty if SQRE = 0.
+
+ The singular values of B can be computed using D1, D2, the first
+ components of all the right singular vectors of the lower block, and
+ the last components of all the right singular vectors of the upper
+ block. These components are stored and updated in VF and VL,
+ respectively, in SLASD6. Hence U and VT are not explicitly
+ referenced.
+
+ The singular values are stored in D. The algorithm consists of two
+ stages:
+
+ The first stage consists of deflating the size of the problem
+ when there are multiple singular values or if there is a zero
+ in the Z vector. For each such occurence the dimension of the
+ secular equation problem is reduced by one. This stage is
+ performed by the routine SLASD7.
+
+ The second stage consists of calculating the updated
+ singular values. This is done by finding the roots of the
+ secular equation via the routine SLASD4 (as called by SLASD8).
+ This routine also updates VF and VL and computes the distances
+ between the updated singular values and the old singular
+ values.
+
+ SLASD6 is called from SLASDA.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has row dimension N = NL + NR + 1,
+ and column dimension M = N + SQRE.
+
+ D (input/output) REAL array, dimension ( NL+NR+1 ).
+ On entry D(1:NL,1:NL) contains the singular values of the
+ upper block, and D(NL+2:N) contains the singular values
+ of the lower block. On exit D(1:N) contains the singular
+ values of the modified matrix.
+
+ VF (input/output) REAL array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VL (input/output) REAL array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors of
+ the lower block. On exit, VL contains the last components of
+ all right singular vectors of the bidiagonal matrix.
+
+ ALPHA (input) REAL
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) REAL
+ Contains the off-diagonal element associated with the added
+ row.
+
+ IDXQ (output) INTEGER array, dimension ( N )
+ This contains the permutation which will reintegrate the
+ subproblem just solved back into sorted order, i.e.
+ D( IDXQ( I = 1, N ) ) will be in ascending order.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM and POLES, must be at least N.
+
+ POLES (output) REAL array, dimension ( LDGNUM, 2 )
+ On exit, POLES(1,*) is an array containing the new singular
+ values obtained from solving the secular equation, and
+ POLES(2,*) is an array containing the poles in the secular
+ equation. Not referenced if ICOMPQ = 0.
+
+ DIFL (output) REAL array, dimension ( N )
+ On exit, DIFL(I) is the distance between I-th updated
+ (undeflated) singular value and the I-th (undeflated) old
+ singular value.
+
+ DIFR (output) REAL array,
+ dimension ( LDGNUM, 2 ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ On exit, DIFR(I, 1) is the distance between I-th updated
+ (undeflated) singular value and the I+1-th (undeflated) old
+ singular value.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ See SLASD8 for details on DIFL and DIFR.
+
+ Z (output) REAL array, dimension ( M )
+ The first elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix,
+ This is the order of the related secular equation. 1 <= K <=N.
+
+ C (output) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ WORK (workspace) REAL array, dimension ( 4 * M )
+
+ IWORK (workspace) INTEGER array, dimension ( 3 * N )
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --vf;
+ --vl;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ poles_dim1 = *ldgnum;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ --difl;
+ --difr;
+ --z__;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -14;
+ } else if (*ldgnum < n) {
+ *info = -16;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD6", &i__1);
+ return 0;
+ }
+
+/*
+ The following values are for bookkeeping purposes only. They are
+ integer pointers which indicate the portion of the workspace
+ used by a particular array in SLASD7 and SLASD8.
+*/
+
+ isigma = 1;
+ iw = isigma + n;
+ ivfw = iw + m;
+ ivlw = ivfw + m;
+
+ idx = 1;
+ idxc = idx + n;
+ idxp = idxc + n;
+
+/*
+ Scale.
+
+ Computing MAX
+*/
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ orgnrm = dmax(r__1,r__2);
+ d__[*nl + 1] = 0.f;
+ i__1 = n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if ((r__1 = d__[i__], dabs(r__1)) > orgnrm) {
+ orgnrm = (r__1 = d__[i__], dabs(r__1));
+ }
+/* L10: */
+ }
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &n, &c__1, &d__[1], &n,
+ info);
+ *alpha /= orgnrm;
+ *beta /= orgnrm;
+
+/* Sort and Deflate singular values. */
+
+ slasd7_(icompq, nl, nr, sqre, k, &d__[1], &z__[1], &work[iw], &vf[1], &
+ work[ivfw], &vl[1], &work[ivlw], alpha, beta, &work[isigma], &
+ iwork[idx], &iwork[idxp], &idxq[1], &perm[1], givptr, &givcol[
+ givcol_offset], ldgcol, &givnum[givnum_offset], ldgnum, c__, s,
+ info);
+
+/* Solve Secular Equation, compute DIFL, DIFR, and update VF, VL. */
+
+ slasd8_(icompq, k, &d__[1], &z__[1], &vf[1], &vl[1], &difl[1], &difr[1],
+ ldgnum, &work[isigma], &work[iw], info);
+
+/* Save the poles if ICOMPQ = 1. */
+
+ if (*icompq == 1) {
+ scopy_(k, &d__[1], &c__1, &poles[poles_dim1 + 1], &c__1);
+ scopy_(k, &work[isigma], &c__1, &poles[((poles_dim1) << (1)) + 1], &
+ c__1);
+ }
+
+/* Unscale. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, &n, &c__1, &d__[1], &n,
+ info);
+
+/* Prepare the IDXQ sorting permutation. */
+
+ n1 = *k;
+ n2 = n - *k;
+ slamrg_(&n1, &n2, &d__[1], &c__1, &c_n1, &idxq[1]);
+
+ return 0;
+
+/* End of SLASD6 */
+
+} /* slasd6_ */
+
+/* Subroutine */ int slasd7_(integer *icompq, integer *nl, integer *nr,
+ integer *sqre, integer *k, real *d__, real *z__, real *zw, real *vf,
+ real *vfw, real *vl, real *vlw, real *alpha, real *beta, real *dsigma,
+ integer *idx, integer *idxp, integer *idxq, integer *perm, integer *
+ givptr, integer *givcol, integer *ldgcol, real *givnum, integer *
+ ldgnum, real *c__, real *s, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, givnum_dim1, givnum_offset, i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static integer i__, j, m, n, k2;
+ static real z1;
+ static integer jp;
+ static real eps, tau, tol;
+ static integer nlp1, nlp2, idxi, idxj;
+ extern /* Subroutine */ int srot_(integer *, real *, integer *, real *,
+ integer *, real *, real *);
+ static integer idxjp, jprev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slapy2_(real *, real *), slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slamrg_(
+ integer *, integer *, real *, integer *, integer *, integer *);
+ static real hlftol;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASD7 merges the two sets of singular values together into a single
+ sorted set. Then it tries to deflate the size of the problem. There
+ are two ways in which deflation can occur: when two or more singular
+ values are close together or if there is a tiny entry in the Z
+ vector. For each such occurrence the order of the related
+ secular equation problem is reduced by one.
+
+ SLASD7 is called from SLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper
+ bidiagonal matrix in compact form.
+
+ NL (input) INTEGER
+ The row dimension of the upper block. NL >= 1.
+
+ NR (input) INTEGER
+ The row dimension of the lower block. NR >= 1.
+
+ SQRE (input) INTEGER
+ = 0: the lower block is an NR-by-NR square matrix.
+ = 1: the lower block is an NR-by-(NR+1) rectangular matrix.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ K (output) INTEGER
+ Contains the dimension of the non-deflated matrix, this is
+ the order of the related secular equation. 1 <= K <=N.
+
+ D (input/output) REAL array, dimension ( N )
+ On entry D contains the singular values of the two submatrices
+ to be combined. On exit D contains the trailing (N-K) updated
+ singular values (those which were deflated) sorted into
+ increasing order.
+
+ Z (output) REAL array, dimension ( M )
+ On exit Z contains the updating row vector in the secular
+ equation.
+
+ ZW (workspace) REAL array, dimension ( M )
+ Workspace for Z.
+
+ VF (input/output) REAL array, dimension ( M )
+ On entry, VF(1:NL+1) contains the first components of all
+ right singular vectors of the upper block; and VF(NL+2:M)
+ contains the first components of all right singular vectors
+ of the lower block. On exit, VF contains the first components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VFW (workspace) REAL array, dimension ( M )
+ Workspace for VF.
+
+ VL (input/output) REAL array, dimension ( M )
+ On entry, VL(1:NL+1) contains the last components of all
+ right singular vectors of the upper block; and VL(NL+2:M)
+ contains the last components of all right singular vectors
+ of the lower block. On exit, VL contains the last components
+ of all right singular vectors of the bidiagonal matrix.
+
+ VLW (workspace) REAL array, dimension ( M )
+ Workspace for VL.
+
+ ALPHA (input) REAL
+ Contains the diagonal element associated with the added row.
+
+ BETA (input) REAL
+ Contains the off-diagonal element associated with the added
+ row.
+
+ DSIGMA (output) REAL array, dimension ( N )
+ Contains a copy of the diagonal elements (K-1 singular values
+ and one zero) in the secular equation.
+
+ IDX (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to sort the contents of
+ D into ascending order.
+
+ IDXP (workspace) INTEGER array, dimension ( N )
+ This will contain the permutation used to place deflated
+ values of D at the end of the array. On output IDXP(2:K)
+ points to the nondeflated D-values and IDXP(K+1:N)
+ points to the deflated singular values.
+
+ IDXQ (input) INTEGER array, dimension ( N )
+ This contains the permutation which separately sorts the two
+ sub-problems in D into ascending order. Note that entries in
+ the first half of this permutation must first be moved one
+ position backward; and entries in the second half
+ must first have NL+1 added to their values.
+
+ PERM (output) INTEGER array, dimension ( N )
+ The permutations (from deflation and sorting) to be applied
+ to each singular block. Not referenced if ICOMPQ = 0.
+
+ GIVPTR (output) INTEGER
+ The number of Givens rotations which took place in this
+ subproblem. Not referenced if ICOMPQ = 0.
+
+ GIVCOL (output) INTEGER array, dimension ( LDGCOL, 2 )
+ Each pair of numbers indicates a pair of columns to take place
+ in a Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGCOL (input) INTEGER
+ The leading dimension of GIVCOL, must be at least N.
+
+ GIVNUM (output) REAL array, dimension ( LDGNUM, 2 )
+ Each number indicates the C or S value to be used in the
+ corresponding Givens rotation. Not referenced if ICOMPQ = 0.
+
+ LDGNUM (input) INTEGER
+ The leading dimension of GIVNUM, must be at least N.
+
+ C (output) REAL
+ C contains garbage if SQRE =0 and the C-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ S (output) REAL
+ S contains garbage if SQRE =0 and the S-value of a Givens
+ rotation related to the right null space if SQRE = 1.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --zw;
+ --vf;
+ --vfw;
+ --vl;
+ --vlw;
+ --dsigma;
+ --idx;
+ --idxp;
+ --idxq;
+ --perm;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ givnum_dim1 = *ldgnum;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+
+ /* Function Body */
+ *info = 0;
+ n = *nl + *nr + 1;
+ m = n + *sqre;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*nl < 1) {
+ *info = -2;
+ } else if (*nr < 1) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldgcol < n) {
+ *info = -22;
+ } else if (*ldgnum < n) {
+ *info = -24;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD7", &i__1);
+ return 0;
+ }
+
+ nlp1 = *nl + 1;
+ nlp2 = *nl + 2;
+ if (*icompq == 1) {
+ *givptr = 0;
+ }
+
+/*
+ Generate the first part of the vector Z and move the singular
+ values in the first part of D one position backward.
+*/
+
+ z1 = *alpha * vl[nlp1];
+ vl[nlp1] = 0.f;
+ tau = vf[nlp1];
+ for (i__ = *nl; i__ >= 1; --i__) {
+ z__[i__ + 1] = *alpha * vl[i__];
+ vl[i__] = 0.f;
+ vf[i__ + 1] = vf[i__];
+ d__[i__ + 1] = d__[i__];
+ idxq[i__ + 1] = idxq[i__] + 1;
+/* L10: */
+ }
+ vf[1] = tau;
+
+/* Generate the second part of the vector Z. */
+
+ i__1 = m;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ z__[i__] = *beta * vf[i__];
+ vf[i__] = 0.f;
+/* L20: */
+ }
+
+/* Sort the singular values into increasing order */
+
+ i__1 = n;
+ for (i__ = nlp2; i__ <= i__1; ++i__) {
+ idxq[i__] += nlp1;
+/* L30: */
+ }
+
+/* DSIGMA, IDXC, IDXC, and ZW are used as storage space. */
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ dsigma[i__] = d__[idxq[i__]];
+ zw[i__] = z__[idxq[i__]];
+ vfw[i__] = vf[idxq[i__]];
+ vlw[i__] = vl[idxq[i__]];
+/* L40: */
+ }
+
+ slamrg_(nl, nr, &dsigma[2], &c__1, &c__1, &idx[2]);
+
+ i__1 = n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ idxi = idx[i__] + 1;
+ d__[i__] = dsigma[idxi];
+ z__[i__] = zw[idxi];
+ vf[i__] = vfw[idxi];
+ vl[i__] = vlw[idxi];
+/* L50: */
+ }
+
+/* Calculate the allowable deflation tolerence */
+
+ eps = slamch_("Epsilon");
+/* Computing MAX */
+ r__1 = dabs(*alpha), r__2 = dabs(*beta);
+ tol = dmax(r__1,r__2);
+/* Computing MAX */
+ r__2 = (r__1 = d__[n], dabs(r__1));
+ tol = eps * 64.f * dmax(r__2,tol);
+
+/*
+ There are 2 kinds of deflation -- first a value in the z-vector
+ is small, second two (or more) singular values are very close
+ together (their difference is small).
+
+ If the value in the z-vector is small, we simply permute the
+ array so that the corresponding singular value is moved to the
+ end.
+
+ If two values in the D-vector are close, we perform a two-sided
+ rotation designed to make one of the corresponding z-vector
+ entries zero, and then permute the array so that the deflated
+ singular value is moved to the end.
+
+ If there are multiple singular values then the problem deflates.
+ Here the number of equal singular values are found. As each equal
+ singular value is found, an elementary reflector is computed to
+ rotate the corresponding singular subspace so that the
+ corresponding components of Z are zero in this new basis.
+*/
+
+ *k = 1;
+ k2 = n + 1;
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ if (j == n) {
+ goto L100;
+ }
+ } else {
+ jprev = j;
+ goto L70;
+ }
+/* L60: */
+ }
+L70:
+ j = jprev;
+L80:
+ ++j;
+ if (j > n) {
+ goto L90;
+ }
+ if ((r__1 = z__[j], dabs(r__1)) <= tol) {
+
+/* Deflate due to small z component. */
+
+ --k2;
+ idxp[k2] = j;
+ } else {
+
+/* Check if singular values are close enough to allow deflation. */
+
+ if ((r__1 = d__[j] - d__[jprev], dabs(r__1)) <= tol) {
+
+/* Deflation is possible. */
+
+ *s = z__[jprev];
+ *c__ = z__[j];
+
+/*
+ Find sqrt(a**2+b**2) without overflow or
+ destructive underflow.
+*/
+
+ tau = slapy2_(c__, s);
+ z__[j] = tau;
+ z__[jprev] = 0.f;
+ *c__ /= tau;
+ *s = -(*s) / tau;
+
+/* Record the appropriate Givens rotation */
+
+ if (*icompq == 1) {
+ ++(*givptr);
+ idxjp = idxq[idx[jprev] + 1];
+ idxj = idxq[idx[j] + 1];
+ if (idxjp <= nlp1) {
+ --idxjp;
+ }
+ if (idxj <= nlp1) {
+ --idxj;
+ }
+ givcol[*givptr + ((givcol_dim1) << (1))] = idxjp;
+ givcol[*givptr + givcol_dim1] = idxj;
+ givnum[*givptr + ((givnum_dim1) << (1))] = *c__;
+ givnum[*givptr + givnum_dim1] = *s;
+ }
+ srot_(&c__1, &vf[jprev], &c__1, &vf[j], &c__1, c__, s);
+ srot_(&c__1, &vl[jprev], &c__1, &vl[j], &c__1, c__, s);
+ --k2;
+ idxp[k2] = jprev;
+ jprev = j;
+ } else {
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+ jprev = j;
+ }
+ }
+ goto L80;
+L90:
+
+/* Record the last singular value. */
+
+ ++(*k);
+ zw[*k] = z__[jprev];
+ dsigma[*k] = d__[jprev];
+ idxp[*k] = jprev;
+
+L100:
+
+/*
+ Sort the singular values into DSIGMA. The singular values which
+ were not deflated go into the first K slots of DSIGMA, except
+ that DSIGMA(1) is treated separately.
+*/
+
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ dsigma[j] = d__[jp];
+ vfw[j] = vf[jp];
+ vlw[j] = vl[jp];
+/* L110: */
+ }
+ if (*icompq == 1) {
+ i__1 = n;
+ for (j = 2; j <= i__1; ++j) {
+ jp = idxp[j];
+ perm[j] = idxq[idx[jp] + 1];
+ if (perm[j] <= nlp1) {
+ --perm[j];
+ }
+/* L120: */
+ }
+ }
+
+/*
+ The deflated singular values go back into the last N - K slots of
+ D.
+*/
+
+ i__1 = n - *k;
+ scopy_(&i__1, &dsigma[*k + 1], &c__1, &d__[*k + 1], &c__1);
+
+/*
+ Determine DSIGMA(1), DSIGMA(2), Z(1), VF(1), VL(1), VF(M), and
+ VL(M).
+*/
+
+ dsigma[1] = 0.f;
+ hlftol = tol / 2.f;
+ if (dabs(dsigma[2]) <= hlftol) {
+ dsigma[2] = hlftol;
+ }
+ if (m > n) {
+ z__[1] = slapy2_(&z1, &z__[m]);
+ if (z__[1] <= tol) {
+ *c__ = 1.f;
+ *s = 0.f;
+ z__[1] = tol;
+ } else {
+ *c__ = z1 / z__[1];
+ *s = -z__[m] / z__[1];
+ }
+ srot_(&c__1, &vf[m], &c__1, &vf[1], &c__1, c__, s);
+ srot_(&c__1, &vl[m], &c__1, &vl[1], &c__1, c__, s);
+ } else {
+ if (dabs(z1) <= tol) {
+ z__[1] = tol;
+ } else {
+ z__[1] = z1;
+ }
+ }
+
+/* Restore Z, VF, and VL. */
+
+ i__1 = *k - 1;
+ scopy_(&i__1, &zw[2], &c__1, &z__[2], &c__1);
+ i__1 = n - 1;
+ scopy_(&i__1, &vfw[2], &c__1, &vf[2], &c__1);
+ i__1 = n - 1;
+ scopy_(&i__1, &vlw[2], &c__1, &vl[2], &c__1);
+
+ return 0;
+
+/* End of SLASD7 */
+
+} /* slasd7_ */
+
+/* Subroutine */ int slasd8_(integer *icompq, integer *k, real *d__, real *
+ z__, real *vf, real *vl, real *difl, real *difr, integer *lddifr,
+ real *dsigma, real *work, integer *info)
+{
+ /* System generated locals */
+ integer difr_dim1, difr_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static integer i__, j;
+ static real dj, rho;
+ static integer iwk1, iwk2, iwk3;
+ static real temp;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ static integer iwk2i, iwk3i;
+ extern doublereal snrm2_(integer *, real *, integer *);
+ static real diflj, difrj, dsigj;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ extern doublereal slamc3_(real *, real *);
+ extern /* Subroutine */ int slasd4_(integer *, integer *, real *, real *,
+ real *, real *, real *, real *, integer *), xerbla_(char *,
+ integer *);
+ static real dsigjp;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slaset_(char *, integer *, integer *, real *, real *,
+ real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Oak Ridge National Lab, Argonne National Lab,
+ Courant Institute, NAG Ltd., and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASD8 finds the square roots of the roots of the secular equation,
+ as defined by the values in DSIGMA and Z. It makes the appropriate
+ calls to SLASD4, and stores, for each element in D, the distance
+ to its two nearest poles (elements in DSIGMA). It also updates
+ the arrays VF and VL, the first and last components of all the
+ right singular vectors of the original bidiagonal matrix.
+
+ SLASD8 is called from SLASD6.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed in
+ factored form in the calling routine:
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors in factored form as well.
+
+ K (input) INTEGER
+ The number of terms in the rational function to be solved
+ by SLASD4. K >= 1.
+
+ D (output) REAL array, dimension ( K )
+ On output, D contains the updated singular values.
+
+ Z (input) REAL array, dimension ( K )
+ The first K elements of this array contain the components
+ of the deflation-adjusted updating row vector.
+
+ VF (input/output) REAL array, dimension ( K )
+ On entry, VF contains information passed through DBEDE8.
+ On exit, VF contains the first K components of the first
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ VL (input/output) REAL array, dimension ( K )
+ On entry, VL contains information passed through DBEDE8.
+ On exit, VL contains the first K components of the last
+ components of all right singular vectors of the bidiagonal
+ matrix.
+
+ DIFL (output) REAL array, dimension ( K )
+ On exit, DIFL(I) = D(I) - DSIGMA(I).
+
+ DIFR (output) REAL array,
+ dimension ( LDDIFR, 2 ) if ICOMPQ = 1 and
+ dimension ( K ) if ICOMPQ = 0.
+ On exit, DIFR(I,1) = D(I) - DSIGMA(I+1), DIFR(K,1) is not
+ defined and will not be referenced.
+
+ If ICOMPQ = 1, DIFR(1:K,2) is an array containing the
+ normalizing factors for the right singular vector matrix.
+
+ LDDIFR (input) INTEGER
+ The leading dimension of DIFR, must be at least K.
+
+ DSIGMA (input) REAL array, dimension ( K )
+ The first K elements of this array contain the old roots
+ of the deflated updating problem. These are the poles
+ of the secular equation.
+
+ WORK (workspace) REAL array, dimension at least 3 * K
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --z__;
+ --vf;
+ --vl;
+ --difl;
+ difr_dim1 = *lddifr;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ --dsigma;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*k < 1) {
+ *info = -2;
+ } else if (*lddifr < *k) {
+ *info = -9;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASD8", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*k == 1) {
+ d__[1] = dabs(z__[1]);
+ difl[1] = d__[1];
+ if (*icompq == 1) {
+ difl[2] = 1.f;
+ difr[((difr_dim1) << (1)) + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/*
+ Modify values DSIGMA(i) to make sure all DSIGMA(i)-DSIGMA(j) can
+ be computed with high relative accuracy (barring over/underflow).
+ This is a problem on machines without a guard digit in
+ add/subtract (Cray XMP, Cray YMP, Cray C 90 and Cray 2).
+ The following code replaces DSIGMA(I) by 2*DSIGMA(I)-DSIGMA(I),
+ which on any of these machines zeros out the bottommost
+ bit of DSIGMA(I) if it is 1; this makes the subsequent
+ subtractions DSIGMA(I)-DSIGMA(J) unproblematic when cancellation
+ occurs. On binary machines with a guard digit (almost all
+ machines) it does not change DSIGMA(I) at all. On hexadecimal
+ and decimal machines with a guard digit, it slightly
+ changes the bottommost bits of DSIGMA(I). It does not account
+ for hexadecimal or decimal machines without guard digits
+ (we know of none). We use a subroutine call to compute
+ 2*DLAMBDA(I) to prevent optimizing compilers from eliminating
+ this code.
+*/
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ dsigma[i__] = slamc3_(&dsigma[i__], &dsigma[i__]) - dsigma[i__];
+/* L10: */
+ }
+
+/* Book keeping. */
+
+ iwk1 = 1;
+ iwk2 = iwk1 + *k;
+ iwk3 = iwk2 + *k;
+ iwk2i = iwk2 - 1;
+ iwk3i = iwk3 - 1;
+
+/* Normalize Z. */
+
+ rho = snrm2_(k, &z__[1], &c__1);
+ slascl_("G", &c__0, &c__0, &rho, &c_b1011, k, &c__1, &z__[1], k, info);
+ rho *= rho;
+
+/* Initialize WORK(IWK3). */
+
+ slaset_("A", k, &c__1, &c_b1011, &c_b1011, &work[iwk3], k);
+
+/*
+ Compute the updated singular values, the arrays DIFL, DIFR,
+ and the updated Z.
+*/
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ slasd4_(k, &j, &dsigma[1], &z__[1], &work[iwk1], &rho, &d__[j], &work[
+ iwk2], info);
+
+/* If the root finder fails, the computation is terminated. */
+
+ if (*info != 0) {
+ return 0;
+ }
+ work[iwk3i + j] = work[iwk3i + j] * work[j] * work[iwk2i + j];
+ difl[j] = -work[j];
+ difr[j + difr_dim1] = -work[j + 1];
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L20: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[iwk3i + i__] = work[iwk3i + i__] * work[i__] * work[iwk2i +
+ i__] / (dsigma[i__] - dsigma[j]) / (dsigma[i__] + dsigma[
+ j]);
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Compute updated Z. */
+
+ i__1 = *k;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ r__2 = sqrt((r__1 = work[iwk3i + i__], dabs(r__1)));
+ z__[i__] = r_sign(&r__2, &z__[i__]);
+/* L50: */
+ }
+
+/* Update VF and VL. */
+
+ i__1 = *k;
+ for (j = 1; j <= i__1; ++j) {
+ diflj = difl[j];
+ dj = d__[j];
+ dsigj = -dsigma[j];
+ if (j < *k) {
+ difrj = -difr[j + difr_dim1];
+ dsigjp = -dsigma[j + 1];
+ }
+ work[j] = -z__[j] / diflj / (dsigma[j] + dj);
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (slamc3_(&dsigma[i__], &dsigj) - diflj) / (
+ dsigma[i__] + dj);
+/* L60: */
+ }
+ i__2 = *k;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ work[i__] = z__[i__] / (slamc3_(&dsigma[i__], &dsigjp) + difrj) /
+ (dsigma[i__] + dj);
+/* L70: */
+ }
+ temp = snrm2_(k, &work[1], &c__1);
+ work[iwk2i + j] = sdot_(k, &work[1], &c__1, &vf[1], &c__1) / temp;
+ work[iwk3i + j] = sdot_(k, &work[1], &c__1, &vl[1], &c__1) / temp;
+ if (*icompq == 1) {
+ difr[j + ((difr_dim1) << (1))] = temp;
+ }
+/* L80: */
+ }
+
+ scopy_(k, &work[iwk2], &c__1, &vf[1], &c__1);
+ scopy_(k, &work[iwk3], &c__1, &vl[1], &c__1);
+
+ return 0;
+
+/* End of SLASD8 */
+
+} /* slasd8_ */
+
+/* Subroutine */ int slasda_(integer *icompq, integer *smlsiz, integer *n,
+ integer *sqre, real *d__, real *e, real *u, integer *ldu, real *vt,
+ integer *k, real *difl, real *difr, real *z__, real *poles, integer *
+ givptr, integer *givcol, integer *ldgcol, integer *perm, real *givnum,
+ real *c__, real *s, real *work, integer *iwork, integer *info)
+{
+ /* System generated locals */
+ integer givcol_dim1, givcol_offset, perm_dim1, perm_offset, difl_dim1,
+ difl_offset, difr_dim1, difr_offset, givnum_dim1, givnum_offset,
+ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset,
+ z_dim1, z_offset, i__1, i__2;
+
+ /* Builtin functions */
+ integer pow_ii(integer *, integer *);
+
+ /* Local variables */
+ static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc,
+ nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1;
+ static real beta;
+ static integer idxq, nlvl;
+ static real alpha;
+ static integer inode, ndiml, ndimr, idxqi, itemp, sqrei;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasd6_(integer *, integer *, integer *, integer *,
+ real *, real *, real *, real *, real *, integer *, integer *,
+ integer *, integer *, integer *, real *, integer *, real *, real *
+ , real *, real *, integer *, real *, real *, real *, integer *,
+ integer *);
+ static integer nwork1, nwork2;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slasdq_(
+ char *, integer *, integer *, integer *, integer *, integer *,
+ real *, real *, real *, integer *, real *, integer *, real *,
+ integer *, real *, integer *), slasdt_(integer *, integer
+ *, integer *, integer *, integer *, integer *, integer *),
+ slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static integer smlszp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ Using a divide and conquer approach, SLASDA computes the singular
+ value decomposition (SVD) of a real upper bidiagonal N-by-M matrix
+ B with diagonal D and offdiagonal E, where M = N + SQRE. The
+ algorithm computes the singular values in the SVD B = U * S * VT.
+ The orthogonal matrices U and VT are optionally computed in
+ compact form.
+
+ A related subroutine, SLASD0, computes the singular values and
+ the singular vectors in explicit form.
+
+ Arguments
+ =========
+
+ ICOMPQ (input) INTEGER
+ Specifies whether singular vectors are to be computed
+ in compact form, as follows
+ = 0: Compute singular values only.
+ = 1: Compute singular vectors of upper bidiagonal
+ matrix in compact form.
+
+ SMLSIZ (input) INTEGER
+ The maximum size of the subproblems at the bottom of the
+ computation tree.
+
+ N (input) INTEGER
+ The row dimension of the upper bidiagonal matrix. This is
+ also the dimension of the main diagonal array D.
+
+ SQRE (input) INTEGER
+ Specifies the column dimension of the bidiagonal matrix.
+ = 0: The bidiagonal matrix has column dimension M = N;
+ = 1: The bidiagonal matrix has column dimension M = N + 1.
+
+ D (input/output) REAL array, dimension ( N )
+ On entry D contains the main diagonal of the bidiagonal
+ matrix. On exit D, if INFO = 0, contains its singular values.
+
+ E (input) REAL array, dimension ( M-1 )
+ Contains the subdiagonal entries of the bidiagonal matrix.
+ On exit, E has been destroyed.
+
+ U (output) REAL array,
+ dimension ( LDU, SMLSIZ ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, U contains the left
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ LDU (input) INTEGER, LDU = > N.
+ The leading dimension of arrays U, VT, DIFL, DIFR, POLES,
+ GIVNUM, and Z.
+
+ VT (output) REAL array,
+ dimension ( LDU, SMLSIZ+1 ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, VT' contains the right
+ singular vector matrices of all subproblems at the bottom
+ level.
+
+ K (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1 and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, K(I) is the dimension of the I-th
+ secular equation on the computation tree.
+
+ DIFL (output) REAL array, dimension ( LDU, NLVL ),
+ where NLVL = floor(log_2 (N/SMLSIZ))).
+
+ DIFR (output) REAL array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ If ICOMPQ = 1, on exit, DIFL(1:N, I) and DIFR(1:N, 2 * I - 1)
+ record distances between singular values on the I-th
+ level and singular values on the (I -1)-th level, and
+ DIFR(1:N, 2 * I ) contains the normalizing factors for
+ the right singular vector matrix. See SLASD8 for details.
+
+ Z (output) REAL array,
+ dimension ( LDU, NLVL ) if ICOMPQ = 1 and
+ dimension ( N ) if ICOMPQ = 0.
+ The first K elements of Z(1, I) contain the components of
+ the deflation-adjusted updating row vector for subproblems
+ on the I-th level.
+
+ POLES (output) REAL array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, POLES(1, 2*I - 1) and
+ POLES(1, 2*I) contain the new and old singular values
+ involved in the secular equations on the I-th level.
+
+ GIVPTR (output) INTEGER array,
+ dimension ( N ) if ICOMPQ = 1, and not referenced if
+ ICOMPQ = 0. If ICOMPQ = 1, on exit, GIVPTR( I ) records
+ the number of Givens rotations performed on the I-th
+ problem on the computation tree.
+
+ GIVCOL (output) INTEGER array,
+ dimension ( LDGCOL, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVCOL(1, 2 *I - 1) and GIVCOL(1, 2 *I) record the locations
+ of Givens rotations performed on the I-th level on the
+ computation tree.
+
+ LDGCOL (input) INTEGER, LDGCOL = > N.
+ The leading dimension of arrays GIVCOL and PERM.
+
+ PERM (output) INTEGER array,
+ dimension ( LDGCOL, NLVL ) if ICOMPQ = 1, and not referenced
+ if ICOMPQ = 0. If ICOMPQ = 1, on exit, PERM(1, I) records
+ permutations done on the I-th level of the computation tree.
+
+ GIVNUM (output) REAL array,
+ dimension ( LDU, 2 * NLVL ) if ICOMPQ = 1, and not
+ referenced if ICOMPQ = 0. If ICOMPQ = 1, on exit, for each I,
+ GIVNUM(1, 2 *I - 1) and GIVNUM(1, 2 *I) record the C- and S-
+ values of Givens rotations performed on the I-th level on
+ the computation tree.
+
+ C (output) REAL array,
+ dimension ( N ) if ICOMPQ = 1, and dimension 1 if ICOMPQ = 0.
+ If ICOMPQ = 1 and the I-th subproblem is not square, on exit,
+ C( I ) contains the C-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ S (output) REAL array, dimension ( N ) if
+ ICOMPQ = 1, and dimension 1 if ICOMPQ = 0. If ICOMPQ = 1
+ and the I-th subproblem is not square, on exit, S( I )
+ contains the S-value of a Givens rotation related to
+ the right null space of the I-th subproblem.
+
+ WORK (workspace) REAL array, dimension
+ (6 * N + (SMLSIZ + 1)*(SMLSIZ + 1)).
+
+ IWORK (workspace) INTEGER array.
+ Dimension must be at least (7 * N).
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: if INFO = 1, an singular value did not converge
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ givnum_dim1 = *ldu;
+ givnum_offset = 1 + givnum_dim1;
+ givnum -= givnum_offset;
+ poles_dim1 = *ldu;
+ poles_offset = 1 + poles_dim1;
+ poles -= poles_offset;
+ z_dim1 = *ldu;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ difr_dim1 = *ldu;
+ difr_offset = 1 + difr_dim1;
+ difr -= difr_offset;
+ difl_dim1 = *ldu;
+ difl_offset = 1 + difl_dim1;
+ difl -= difl_offset;
+ vt_dim1 = *ldu;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ --k;
+ --givptr;
+ perm_dim1 = *ldgcol;
+ perm_offset = 1 + perm_dim1;
+ perm -= perm_offset;
+ givcol_dim1 = *ldgcol;
+ givcol_offset = 1 + givcol_dim1;
+ givcol -= givcol_offset;
+ --c__;
+ --s;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+
+ if ((*icompq < 0) || (*icompq > 1)) {
+ *info = -1;
+ } else if (*smlsiz < 3) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -4;
+ } else if (*ldu < *n + *sqre) {
+ *info = -8;
+ } else if (*ldgcol < *n) {
+ *info = -17;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASDA", &i__1);
+ return 0;
+ }
+
+ m = *n + *sqre;
+
+/* If the input matrix is too small, call SLASDQ to find the SVD. */
+
+ if (*n <= *smlsiz) {
+ if (*icompq == 0) {
+ slasdq_("U", sqre, n, &c__0, &c__0, &c__0, &d__[1], &e[1], &vt[
+ vt_offset], ldu, &u[u_offset], ldu, &u[u_offset], ldu, &
+ work[1], info);
+ } else {
+ slasdq_("U", sqre, n, &m, n, &c__0, &d__[1], &e[1], &vt[vt_offset]
+ , ldu, &u[u_offset], ldu, &u[u_offset], ldu, &work[1],
+ info);
+ }
+ return 0;
+ }
+
+/* Book-keeping and set up the computation tree. */
+
+ inode = 1;
+ ndiml = inode + *n;
+ ndimr = ndiml + *n;
+ idxq = ndimr + *n;
+ iwk = idxq + *n;
+
+ ncc = 0;
+ nru = 0;
+
+ smlszp = *smlsiz + 1;
+ vf = 1;
+ vl = vf + m;
+ nwork1 = vl + m;
+ nwork2 = nwork1 + smlszp * smlszp;
+
+ slasdt_(n, &nlvl, &nd, &iwork[inode], &iwork[ndiml], &iwork[ndimr],
+ smlsiz);
+
+/*
+ for the nodes on bottom level of the tree, solve
+ their subproblems by SLASDQ.
+*/
+
+ ndb1 = (nd + 1) / 2;
+ i__1 = nd;
+ for (i__ = ndb1; i__ <= i__1; ++i__) {
+
+/*
+ IC : center row of each node
+ NL : number of rows of left subproblem
+ NR : number of rows of right subproblem
+ NLF: starting row of the left subproblem
+ NRF: starting row of the right subproblem
+*/
+
+ i1 = i__ - 1;
+ ic = iwork[inode + i1];
+ nl = iwork[ndiml + i1];
+ nlp1 = nl + 1;
+ nr = iwork[ndimr + i1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ idxqi = idxq + nlf - 2;
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ sqrei = 1;
+ if (*icompq == 0) {
+ slaset_("A", &nlp1, &nlp1, &c_b320, &c_b1011, &work[nwork1], &
+ smlszp);
+ slasdq_("U", &sqrei, &nl, &nlp1, &nru, &ncc, &d__[nlf], &e[nlf], &
+ work[nwork1], &smlszp, &work[nwork2], &nl, &work[nwork2],
+ &nl, &work[nwork2], info);
+ itemp = nwork1 + nl * smlszp;
+ scopy_(&nlp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ scopy_(&nlp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ slaset_("A", &nl, &nl, &c_b320, &c_b1011, &u[nlf + u_dim1], ldu);
+ slaset_("A", &nlp1, &nlp1, &c_b320, &c_b1011, &vt[nlf + vt_dim1],
+ ldu);
+ slasdq_("U", &sqrei, &nl, &nlp1, &nl, &ncc, &d__[nlf], &e[nlf], &
+ vt[nlf + vt_dim1], ldu, &u[nlf + u_dim1], ldu, &u[nlf +
+ u_dim1], ldu, &work[nwork1], info);
+ scopy_(&nlp1, &vt[nlf + vt_dim1], &c__1, &work[vfi], &c__1);
+ scopy_(&nlp1, &vt[nlf + nlp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nl;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L10: */
+ }
+ if (i__ == nd && *sqre == 0) {
+ sqrei = 0;
+ } else {
+ sqrei = 1;
+ }
+ idxqi += nlp1;
+ vfi += nlp1;
+ vli += nlp1;
+ nrp1 = nr + sqrei;
+ if (*icompq == 0) {
+ slaset_("A", &nrp1, &nrp1, &c_b320, &c_b1011, &work[nwork1], &
+ smlszp);
+ slasdq_("U", &sqrei, &nr, &nrp1, &nru, &ncc, &d__[nrf], &e[nrf], &
+ work[nwork1], &smlszp, &work[nwork2], &nr, &work[nwork2],
+ &nr, &work[nwork2], info);
+ itemp = nwork1 + (nrp1 - 1) * smlszp;
+ scopy_(&nrp1, &work[nwork1], &c__1, &work[vfi], &c__1);
+ scopy_(&nrp1, &work[itemp], &c__1, &work[vli], &c__1);
+ } else {
+ slaset_("A", &nr, &nr, &c_b320, &c_b1011, &u[nrf + u_dim1], ldu);
+ slaset_("A", &nrp1, &nrp1, &c_b320, &c_b1011, &vt[nrf + vt_dim1],
+ ldu);
+ slasdq_("U", &sqrei, &nr, &nrp1, &nr, &ncc, &d__[nrf], &e[nrf], &
+ vt[nrf + vt_dim1], ldu, &u[nrf + u_dim1], ldu, &u[nrf +
+ u_dim1], ldu, &work[nwork1], info);
+ scopy_(&nrp1, &vt[nrf + vt_dim1], &c__1, &work[vfi], &c__1);
+ scopy_(&nrp1, &vt[nrf + nrp1 * vt_dim1], &c__1, &work[vli], &c__1)
+ ;
+ }
+ if (*info != 0) {
+ return 0;
+ }
+ i__2 = nr;
+ for (j = 1; j <= i__2; ++j) {
+ iwork[idxqi + j] = j;
+/* L20: */
+ }
+/* L30: */
+ }
+
+/* Now conquer each subproblem bottom-up. */
+
+ j = pow_ii(&c__2, &nlvl);
+ for (lvl = nlvl; lvl >= 1; --lvl) {
+ lvl2 = ((lvl) << (1)) - 1;
+
+/*
+ Find the first node LF and last node LL on
+ the current level LVL.
+*/
+
+ if (lvl == 1) {
+ lf = 1;
+ ll = 1;
+ } else {
+ i__1 = lvl - 1;
+ lf = pow_ii(&c__2, &i__1);
+ ll = ((lf) << (1)) - 1;
+ }
+ i__1 = ll;
+ for (i__ = lf; i__ <= i__1; ++i__) {
+ im1 = i__ - 1;
+ ic = iwork[inode + im1];
+ nl = iwork[ndiml + im1];
+ nr = iwork[ndimr + im1];
+ nlf = ic - nl;
+ nrf = ic + 1;
+ if (i__ == ll) {
+ sqrei = *sqre;
+ } else {
+ sqrei = 1;
+ }
+ vfi = vf + nlf - 1;
+ vli = vl + nlf - 1;
+ idxqi = idxq + nlf - 1;
+ alpha = d__[ic];
+ beta = e[ic];
+ if (*icompq == 0) {
+ slasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[
+ perm_offset], &givptr[1], &givcol[givcol_offset],
+ ldgcol, &givnum[givnum_offset], ldu, &poles[
+ poles_offset], &difl[difl_offset], &difr[difr_offset],
+ &z__[z_offset], &k[1], &c__[1], &s[1], &work[nwork1],
+ &iwork[iwk], info);
+ } else {
+ --j;
+ slasd6_(icompq, &nl, &nr, &sqrei, &d__[nlf], &work[vfi], &
+ work[vli], &alpha, &beta, &iwork[idxqi], &perm[nlf +
+ lvl * perm_dim1], &givptr[j], &givcol[nlf + lvl2 *
+ givcol_dim1], ldgcol, &givnum[nlf + lvl2 *
+ givnum_dim1], ldu, &poles[nlf + lvl2 * poles_dim1], &
+ difl[nlf + lvl * difl_dim1], &difr[nlf + lvl2 *
+ difr_dim1], &z__[nlf + lvl * z_dim1], &k[j], &c__[j],
+ &s[j], &work[nwork1], &iwork[iwk], info);
+ }
+ if (*info != 0) {
+ return 0;
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+
+ return 0;
+
+/* End of SLASDA */
+
+} /* slasda_ */
+
+/* Subroutine */ int slasdq_(char *uplo, integer *sqre, integer *n, integer *
+ ncvt, integer *nru, integer *ncc, real *d__, real *e, real *vt,
+ integer *ldvt, real *u, integer *ldu, real *c__, integer *ldc, real *
+ work, integer *info)
+{
+ /* System generated locals */
+ integer c_dim1, c_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
+ i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static real r__, cs, sn;
+ static integer np1, isub;
+ static real smin;
+ static integer sqre1;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *);
+ static integer iuplo;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), xerbla_(char *, integer *), slartg_(real *,
+ real *, real *, real *, real *);
+ static logical rotate;
+ extern /* Subroutine */ int sbdsqr_(char *, integer *, integer *, integer
+ *, integer *, real *, real *, real *, integer *, real *, integer *
+ , real *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASDQ computes the singular value decomposition (SVD) of a real
+ (upper or lower) bidiagonal matrix with diagonal D and offdiagonal
+ E, accumulating the transformations if desired. Letting B denote
+ the input bidiagonal matrix, the algorithm computes orthogonal
+ matrices Q and P such that B = Q * S * P' (P' denotes the transpose
+ of P). The singular values S are overwritten on D.
+
+ The input matrix U is changed to U * Q if desired.
+ The input matrix VT is changed to P' * VT if desired.
+ The input matrix C is changed to Q' * C if desired.
+
+ See "Computing Small Singular Values of Bidiagonal Matrices With
+ Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan,
+ LAPACK Working Note #3, for a detailed description of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ On entry, UPLO specifies whether the input bidiagonal matrix
+ is upper or lower bidiagonal, and wether it is square are
+ not.
+ UPLO = 'U' or 'u' B is upper bidiagonal.
+ UPLO = 'L' or 'l' B is lower bidiagonal.
+
+ SQRE (input) INTEGER
+ = 0: then the input matrix is N-by-N.
+ = 1: then the input matrix is N-by-(N+1) if UPLU = 'U' and
+ (N+1)-by-N if UPLU = 'L'.
+
+ The bidiagonal matrix has
+ N = NL + NR + 1 rows and
+ M = N + SQRE >= N columns.
+
+ N (input) INTEGER
+ On entry, N specifies the number of rows and columns
+ in the matrix. N must be at least 0.
+
+ NCVT (input) INTEGER
+ On entry, NCVT specifies the number of columns of
+ the matrix VT. NCVT must be at least 0.
+
+ NRU (input) INTEGER
+ On entry, NRU specifies the number of rows of
+ the matrix U. NRU must be at least 0.
+
+ NCC (input) INTEGER
+ On entry, NCC specifies the number of columns of
+ the matrix C. NCC must be at least 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, D contains the diagonal entries of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in ascending order.
+
+ E (input/output) REAL array.
+ dimension is (N-1) if SQRE = 0 and N if SQRE = 1.
+ On entry, the entries of E contain the offdiagonal entries
+ of the bidiagonal matrix whose SVD is desired. On normal
+ exit, E will contain 0. If the algorithm does not converge,
+ D and E will contain the diagonal and superdiagonal entries
+ of a bidiagonal matrix orthogonally equivalent to the one
+ given as input.
+
+ VT (input/output) REAL array, dimension (LDVT, NCVT)
+ On entry, contains a matrix which on exit has been
+ premultiplied by P', dimension N-by-NCVT if SQRE = 0
+ and (N+1)-by-NCVT if SQRE = 1 (not referenced if NCVT=0).
+
+ LDVT (input) INTEGER
+ On entry, LDVT specifies the leading dimension of VT as
+ declared in the calling (sub) program. LDVT must be at
+ least 1. If NCVT is nonzero LDVT must also be at least N.
+
+ U (input/output) REAL array, dimension (LDU, N)
+ On entry, contains a matrix which on exit has been
+ postmultiplied by Q, dimension NRU-by-N if SQRE = 0
+ and NRU-by-(N+1) if SQRE = 1 (not referenced if NRU=0).
+
+ LDU (input) INTEGER
+ On entry, LDU specifies the leading dimension of U as
+ declared in the calling (sub) program. LDU must be at
+ least max( 1, NRU ) .
+
+ C (input/output) REAL array, dimension (LDC, NCC)
+ On entry, contains an N-by-NCC matrix which on exit
+ has been premultiplied by Q' dimension N-by-NCC if SQRE = 0
+ and (N+1)-by-NCC if SQRE = 1 (not referenced if NCC=0).
+
+ LDC (input) INTEGER
+ On entry, LDC specifies the leading dimension of C as
+ declared in the calling (sub) program. LDC must be at
+ least 1. If NCC is nonzero, LDC must also be at least N.
+
+ WORK (workspace) REAL array, dimension (4*N)
+ Workspace. Only referenced if one of NCVT, NRU, or NCC is
+ nonzero, and if N is at least 2.
+
+ INFO (output) INTEGER
+ On exit, a value of 0 indicates a successful exit.
+ If INFO < 0, argument number -INFO is illegal.
+ If INFO > 0, the algorithm did not converge, and INFO
+ specifies how many superdiagonals did not converge.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ vt_dim1 = *ldvt;
+ vt_offset = 1 + vt_dim1;
+ vt -= vt_offset;
+ u_dim1 = *ldu;
+ u_offset = 1 + u_dim1;
+ u -= u_offset;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ iuplo = 0;
+ if (lsame_(uplo, "U")) {
+ iuplo = 1;
+ }
+ if (lsame_(uplo, "L")) {
+ iuplo = 2;
+ }
+ if (iuplo == 0) {
+ *info = -1;
+ } else if ((*sqre < 0) || (*sqre > 1)) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*ncvt < 0) {
+ *info = -4;
+ } else if (*nru < 0) {
+ *info = -5;
+ } else if (*ncc < 0) {
+ *info = -6;
+ } else if ((*ncvt == 0 && *ldvt < 1) || (*ncvt > 0 && *ldvt < max(1,*n)))
+ {
+ *info = -10;
+ } else if (*ldu < max(1,*nru)) {
+ *info = -12;
+ } else if ((*ncc == 0 && *ldc < 1) || (*ncc > 0 && *ldc < max(1,*n))) {
+ *info = -14;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASDQ", &i__1);
+ return 0;
+ }
+ if (*n == 0) {
+ return 0;
+ }
+
+/* ROTATE is true if any singular vectors desired, false otherwise */
+
+ rotate = ((*ncvt > 0) || (*nru > 0)) || (*ncc > 0);
+ np1 = *n + 1;
+ sqre1 = *sqre;
+
+/*
+ If matrix non-square upper bidiagonal, rotate to be lower
+ bidiagonal. The rotations are on the right.
+*/
+
+ if (iuplo == 1 && sqre1 == 1) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L10: */
+ }
+ slartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ e[*n] = 0.f;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ iuplo = 2;
+ sqre1 = 0;
+
+/* Update singular vectors if desired. */
+
+ if (*ncvt > 0) {
+ slasr_("L", "V", "F", &np1, ncvt, &work[1], &work[np1], &vt[
+ vt_offset], ldvt);
+ }
+ }
+
+/*
+ If matrix lower bidiagonal, rotate to be upper bidiagonal
+ by applying Givens rotations on the left.
+*/
+
+ if (iuplo == 2) {
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ slartg_(&d__[i__], &e[i__], &cs, &sn, &r__);
+ d__[i__] = r__;
+ e[i__] = sn * d__[i__ + 1];
+ d__[i__ + 1] = cs * d__[i__ + 1];
+ if (rotate) {
+ work[i__] = cs;
+ work[*n + i__] = sn;
+ }
+/* L20: */
+ }
+
+/*
+ If matrix (N+1)-by-N lower bidiagonal, one additional
+ rotation is needed.
+*/
+
+ if (sqre1 == 1) {
+ slartg_(&d__[*n], &e[*n], &cs, &sn, &r__);
+ d__[*n] = r__;
+ if (rotate) {
+ work[*n] = cs;
+ work[*n + *n] = sn;
+ }
+ }
+
+/* Update singular vectors if desired. */
+
+ if (*nru > 0) {
+ if (sqre1 == 0) {
+ slasr_("R", "V", "F", nru, n, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ } else {
+ slasr_("R", "V", "F", nru, &np1, &work[1], &work[np1], &u[
+ u_offset], ldu);
+ }
+ }
+ if (*ncc > 0) {
+ if (sqre1 == 0) {
+ slasr_("L", "V", "F", n, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ } else {
+ slasr_("L", "V", "F", &np1, ncc, &work[1], &work[np1], &c__[
+ c_offset], ldc);
+ }
+ }
+ }
+
+/*
+ Call SBDSQR to compute the SVD of the reduced real
+ N-by-N upper bidiagonal matrix.
+*/
+
+ sbdsqr_("U", n, ncvt, nru, ncc, &d__[1], &e[1], &vt[vt_offset], ldvt, &u[
+ u_offset], ldu, &c__[c_offset], ldc, &work[1], info);
+
+/*
+ Sort the singular values into ascending order (insertion sort on
+ singular values, but only one transposition per singular vector)
+*/
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Scan for smallest D(I). */
+
+ isub = i__;
+ smin = d__[i__];
+ i__2 = *n;
+ for (j = i__ + 1; j <= i__2; ++j) {
+ if (d__[j] < smin) {
+ isub = j;
+ smin = d__[j];
+ }
+/* L30: */
+ }
+ if (isub != i__) {
+
+/* Swap singular values and vectors. */
+
+ d__[isub] = d__[i__];
+ d__[i__] = smin;
+ if (*ncvt > 0) {
+ sswap_(ncvt, &vt[isub + vt_dim1], ldvt, &vt[i__ + vt_dim1],
+ ldvt);
+ }
+ if (*nru > 0) {
+ sswap_(nru, &u[isub * u_dim1 + 1], &c__1, &u[i__ * u_dim1 + 1]
+ , &c__1);
+ }
+ if (*ncc > 0) {
+ sswap_(ncc, &c__[isub + c_dim1], ldc, &c__[i__ + c_dim1], ldc)
+ ;
+ }
+ }
+/* L40: */
+ }
+
+ return 0;
+
+/* End of SLASDQ */
+
+} /* slasdq_ */
+
+/* Subroutine */ int slasdt_(integer *n, integer *lvl, integer *nd, integer *
+ inode, integer *ndiml, integer *ndimr, integer *msub)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+
+ /* Local variables */
+ static integer i__, il, ir, maxn;
+ static real temp;
+ static integer nlvl, llst, ncrnt;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASDT creates a tree of subproblems for bidiagonal divide and
+ conquer.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ On entry, the number of diagonal elements of the
+ bidiagonal matrix.
+
+ LVL (output) INTEGER
+ On exit, the number of levels on the computation tree.
+
+ ND (output) INTEGER
+ On exit, the number of nodes on the tree.
+
+ INODE (output) INTEGER array, dimension ( N )
+ On exit, centers of subproblems.
+
+ NDIML (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of left children.
+
+ NDIMR (output) INTEGER array, dimension ( N )
+ On exit, row dimensions of right children.
+
+ MSUB (input) INTEGER.
+ On entry, the maximum row dimension each subproblem at the
+ bottom of the tree can be of.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Ming Gu and Huan Ren, Computer Science Division, University of
+ California at Berkeley, USA
+
+ =====================================================================
+
+
+ Find the number of levels on the tree.
+*/
+
+ /* Parameter adjustments */
+ --ndimr;
+ --ndiml;
+ --inode;
+
+ /* Function Body */
+ maxn = max(1,*n);
+ temp = log((real) maxn / (real) (*msub + 1)) / log(2.f);
+ *lvl = (integer) temp + 1;
+
+ i__ = *n / 2;
+ inode[1] = i__ + 1;
+ ndiml[1] = i__;
+ ndimr[1] = *n - i__ - 1;
+ il = 0;
+ ir = 1;
+ llst = 1;
+ i__1 = *lvl - 1;
+ for (nlvl = 1; nlvl <= i__1; ++nlvl) {
+
+/*
+ Constructing the tree at (NLVL+1)-st level. The number of
+ nodes created on this level is LLST * 2.
+*/
+
+ i__2 = llst - 1;
+ for (i__ = 0; i__ <= i__2; ++i__) {
+ il += 2;
+ ir += 2;
+ ncrnt = llst + i__;
+ ndiml[il] = ndiml[ncrnt] / 2;
+ ndimr[il] = ndiml[ncrnt] - ndiml[il] - 1;
+ inode[il] = inode[ncrnt] - ndimr[il] - 1;
+ ndiml[ir] = ndimr[ncrnt] / 2;
+ ndimr[ir] = ndimr[ncrnt] - ndiml[ir] - 1;
+ inode[ir] = inode[ncrnt] + ndiml[ir] + 1;
+/* L10: */
+ }
+ llst <<= 1;
+/* L20: */
+ }
+ *nd = ((llst) << (1)) - 1;
+
+ return 0;
+
+/* End of SLASDT */
+
+} /* slasdt_ */
+
+/* Subroutine */ int slaset_(char *uplo, integer *m, integer *n, real *alpha,
+ real *beta, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j;
+ extern logical lsame_(char *, char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLASET initializes an m-by-n matrix A to BETA on the diagonal and
+ ALPHA on the offdiagonals.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies the part of the matrix A to be set.
+ = 'U': Upper triangular part is set; the strictly lower
+ triangular part of A is not changed.
+ = 'L': Lower triangular part is set; the strictly upper
+ triangular part of A is not changed.
+ Otherwise: All of the matrix A is set.
+
+ M (input) INTEGER
+ The number of rows of the matrix A. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. N >= 0.
+
+ ALPHA (input) REAL
+ The constant to which the offdiagonal elements are to be set.
+
+ BETA (input) REAL
+ The constant to which the diagonal elements are to be set.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On exit, the leading m-by-n submatrix of A is set as follows:
+
+ if UPLO = 'U', A(i,j) = ALPHA, 1<=i<=j-1, 1<=j<=n,
+ if UPLO = 'L', A(i,j) = ALPHA, j+1<=i<=m, 1<=j<=n,
+ otherwise, A(i,j) = ALPHA, 1<=i<=m, 1<=j<=n, i.ne.j,
+
+ and, for all UPLO, A(i,i) = BETA, 1<=i<=min(m,n).
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ if (lsame_(uplo, "U")) {
+
+/*
+ Set the strictly upper triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+/* Computing MIN */
+ i__3 = j - 1;
+ i__2 = min(i__3,*m);
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L10: */
+ }
+/* L20: */
+ }
+
+ } else if (lsame_(uplo, "L")) {
+
+/*
+ Set the strictly lower triangular or trapezoidal part of the
+ array to ALPHA.
+*/
+
+ i__1 = min(*m,*n);
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L30: */
+ }
+/* L40: */
+ }
+
+ } else {
+
+/* Set the leading m-by-n submatrix to ALPHA. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = *alpha;
+/* L50: */
+ }
+/* L60: */
+ }
+ }
+
+/* Set the first min(M,N) diagonal elements to BETA. */
+
+ i__1 = min(*m,*n);
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ a[i__ + i__ * a_dim1] = *beta;
+/* L70: */
+ }
+
+ return 0;
+
+/* End of SLASET */
+
+} /* slaset_ */
+
+/* Subroutine */ int slasq1_(integer *n, real *d__, real *e, real *work,
+ integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__;
+ static real eps;
+ extern /* Subroutine */ int slas2_(real *, real *, real *, real *, real *)
+ ;
+ static real scale;
+ static integer iinfo;
+ static real sigmn, sigmx;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *), slasq2_(integer *, real *, integer *);
+ extern doublereal slamch_(char *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *), slascl_(
+ char *, integer *, integer *, real *, real *, integer *, integer *
+ , real *, integer *, integer *), slasrt_(char *, integer *
+ , real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASQ1 computes the singular values of a real N-by-N bidiagonal
+ matrix with diagonal D and off-diagonal E. The singular values
+ are computed to high relative accuracy, in the absence of
+ denormalization, underflow and overflow. The algorithm was first
+ presented in
+
+ "Accurate singular values and differential qd algorithms" by K. V.
+ Fernando and B. N. Parlett, Numer. Math., Vol-67, No. 2, pp. 191-230,
+ 1994,
+
+ and the present implementation is described in "An implementation of
+ the dqds Algorithm (Positive Case)", LAPACK Working Note.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, D contains the diagonal elements of the
+ bidiagonal matrix whose SVD is desired. On normal exit,
+ D contains the singular values in decreasing order.
+
+ E (input/output) REAL array, dimension (N)
+ On entry, elements E(1:N-1) contain the off-diagonal elements
+ of the bidiagonal matrix whose SVD is desired.
+ On exit, E is overwritten.
+
+ WORK (workspace) REAL array, dimension (4*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --work;
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ if (*n < 0) {
+ *info = -2;
+ i__1 = -(*info);
+ xerbla_("SLASQ1", &i__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+ d__[1] = dabs(d__[1]);
+ return 0;
+ } else if (*n == 2) {
+ slas2_(&d__[1], &e[1], &d__[2], &sigmn, &sigmx);
+ d__[1] = sigmx;
+ d__[2] = sigmn;
+ return 0;
+ }
+
+/* Estimate the largest singular value. */
+
+ sigmx = 0.f;
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = (r__1 = d__[i__], dabs(r__1));
+/* Computing MAX */
+ r__2 = sigmx, r__3 = (r__1 = e[i__], dabs(r__1));
+ sigmx = dmax(r__2,r__3);
+/* L10: */
+ }
+ d__[*n] = (r__1 = d__[*n], dabs(r__1));
+
+/* Early return if SIGMX is zero (matrix is already diagonal). */
+
+ if (sigmx == 0.f) {
+ slasrt_("D", n, &d__[1], &iinfo);
+ return 0;
+ }
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing MAX */
+ r__1 = sigmx, r__2 = d__[i__];
+ sigmx = dmax(r__1,r__2);
+/* L20: */
+ }
+
+/*
+ Copy D and E into WORK (in the Z format) and scale (squaring the
+ input data makes scaling by a power of the radix pointless).
+*/
+
+ eps = slamch_("Precision");
+ safmin = slamch_("Safe minimum");
+ scale = sqrt(eps / safmin);
+ scopy_(n, &d__[1], &c__1, &work[1], &c__2);
+ i__1 = *n - 1;
+ scopy_(&i__1, &e[1], &c__1, &work[2], &c__2);
+ i__1 = ((*n) << (1)) - 1;
+ i__2 = ((*n) << (1)) - 1;
+ slascl_("G", &c__0, &c__0, &sigmx, &scale, &i__1, &c__1, &work[1], &i__2,
+ &iinfo);
+
+/* Compute the q's and e's. */
+
+ i__1 = ((*n) << (1)) - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ r__1 = work[i__];
+ work[i__] = r__1 * r__1;
+/* L30: */
+ }
+ work[*n * 2] = 0.f;
+
+ slasq2_(n, &work[1], info);
+
+ if (*info == 0) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ d__[i__] = sqrt(work[i__]);
+/* L40: */
+ }
+ slascl_("G", &c__0, &c__0, &scale, &sigmx, n, &c__1, &d__[1], n, &
+ iinfo);
+ }
+
+ return 0;
+
+/* End of SLASQ1 */
+
+} /* slasq1_ */
+
+/* Subroutine */ int slasq2_(integer *n, real *z__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2, i__3;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real d__, e;
+ static integer k;
+ static real s, t;
+ static integer i0, i4, n0, pp;
+ static real eps, tol;
+ static integer ipn4;
+ static real tol2;
+ static logical ieee;
+ static integer nbig;
+ static real dmin__, emin, emax;
+ static integer ndiv, iter;
+ static real qmin, temp, qmax, zmax;
+ static integer splt, nfail;
+ static real desig, trace, sigma;
+ static integer iinfo;
+ extern /* Subroutine */ int slasq3_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, integer *, integer *, integer *
+ , logical *);
+ extern doublereal slamch_(char *);
+ static integer iwhila, iwhilb;
+ static real oldemn, safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASQ2 computes all the eigenvalues of the symmetric positive
+ definite tridiagonal matrix associated with the qd array Z to high
+ relative accuracy are computed to high relative accuracy, in the
+ absence of denormalization, underflow and overflow.
+
+ To see the relation of Z to the tridiagonal matrix, let L be a
+ unit lower bidiagonal matrix with subdiagonals Z(2,4,6,,..) and
+ let U be an upper bidiagonal matrix with 1's above and diagonal
+ Z(1,3,5,,..). The tridiagonal is L*U or, if you prefer, the
+ symmetric tridiagonal to which it is similar.
+
+ Note : SLASQ2 defines a logical variable, IEEE, which is true
+ on machines which follow ieee-754 floating-point standard in their
+ handling of infinities and NaNs, and false otherwise. This variable
+ is passed to SLASQ3.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of rows and columns in the matrix. N >= 0.
+
+ Z (workspace) REAL array, dimension ( 4*N )
+ On entry Z holds the qd array. On exit, entries 1 to N hold
+ the eigenvalues in decreasing order, Z( 2*N+1 ) holds the
+ trace, and Z( 2*N+2 ) holds the sum of the eigenvalues. If
+ N > 2, then Z( 2*N+3 ) holds the iteration count, Z( 2*N+4 )
+ holds NDIVS/NIN^2, and Z( 2*N+5 ) holds the percentage of
+ shifts that failed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if the i-th argument is a scalar and had an illegal
+ value, then INFO = -i, if the i-th argument is an
+ array and the j-entry had an illegal value, then
+ INFO = -(i*100+j)
+ > 0: the algorithm failed
+ = 1, a split was marked by a positive value in E
+ = 2, current block of Z not diagonalized after 30*N
+ iterations (in inner while loop)
+ = 3, termination criterion of outer while loop not met
+ (program created more than N unreduced blocks)
+
+ Further Details
+ ===============
+ Local Variables: I0:N0 defines a current unreduced segment of Z.
+ The shifts are accumulated in SIGMA. Iteration count is in ITER.
+ Ping-pong is controlled by PP (alternates between 0 and 1).
+
+ =====================================================================
+
+
+ Test the input arguments.
+ (in case SLASQ2 is not called by SLASQ1)
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ *info = 0;
+ eps = slamch_("Precision");
+ safmin = slamch_("Safe minimum");
+ tol = eps * 100.f;
+/* Computing 2nd power */
+ r__1 = tol;
+ tol2 = r__1 * r__1;
+
+ if (*n < 0) {
+ *info = -1;
+ xerbla_("SLASQ2", &c__1);
+ return 0;
+ } else if (*n == 0) {
+ return 0;
+ } else if (*n == 1) {
+
+/* 1-by-1 case. */
+
+ if (z__[1] < 0.f) {
+ *info = -201;
+ xerbla_("SLASQ2", &c__2);
+ }
+ return 0;
+ } else if (*n == 2) {
+
+/* 2-by-2 case. */
+
+ if ((z__[2] < 0.f) || (z__[3] < 0.f)) {
+ *info = -2;
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ } else if (z__[3] > z__[1]) {
+ d__ = z__[3];
+ z__[3] = z__[1];
+ z__[1] = d__;
+ }
+ z__[5] = z__[1] + z__[2] + z__[3];
+ if (z__[2] > z__[3] * tol2) {
+ t = (z__[1] - z__[3] + z__[2]) * .5f;
+ s = z__[3] * (z__[2] / t);
+ if (s <= t) {
+ s = z__[3] * (z__[2] / (t * (sqrt(s / t + 1.f) + 1.f)));
+ } else {
+ s = z__[3] * (z__[2] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[1] + (s + z__[2]);
+ z__[3] *= z__[1] / t;
+ z__[1] = t;
+ }
+ z__[2] = z__[3];
+ z__[6] = z__[2] + z__[1];
+ return 0;
+ }
+
+/* Check for negative data and compute sums of q's and e's. */
+
+ z__[*n * 2] = 0.f;
+ emin = z__[2];
+ qmax = 0.f;
+ zmax = 0.f;
+ d__ = 0.f;
+ e = 0.f;
+
+ i__1 = (*n - 1) << (1);
+ for (k = 1; k <= i__1; k += 2) {
+ if (z__[k] < 0.f) {
+ *info = -(k + 200);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ } else if (z__[k + 1] < 0.f) {
+ *info = -(k + 201);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[k];
+ e += z__[k + 1];
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[k];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[k + 1];
+ emin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = max(qmax,zmax), r__2 = z__[k + 1];
+ zmax = dmax(r__1,r__2);
+/* L10: */
+ }
+ if (z__[((*n) << (1)) - 1] < 0.f) {
+ *info = -(((*n) << (1)) + 199);
+ xerbla_("SLASQ2", &c__2);
+ return 0;
+ }
+ d__ += z__[((*n) << (1)) - 1];
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[((*n) << (1)) - 1];
+ qmax = dmax(r__1,r__2);
+ zmax = dmax(qmax,zmax);
+
+/* Check for diagonality. */
+
+ if (e == 0.f) {
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (1)) - 1];
+/* L20: */
+ }
+ slasrt_("D", n, &z__[1], &iinfo);
+ z__[((*n) << (1)) - 1] = d__;
+ return 0;
+ }
+
+ trace = d__ + e;
+
+/* Check for zero data. */
+
+ if (trace == 0.f) {
+ z__[((*n) << (1)) - 1] = 0.f;
+ return 0;
+ }
+
+/* Check whether the machine is IEEE conformable. */
+
+ ieee = ilaenv_(&c__10, "SLASQ2", "N", &c__1, &c__2, &c__3, &c__4, (ftnlen)
+ 6, (ftnlen)1) == 1 && ilaenv_(&c__11, "SLASQ2", "N", &c__1, &c__2,
+ &c__3, &c__4, (ftnlen)6, (ftnlen)1) == 1;
+
+/* Rearrange data for locality: Z=(q1,qq1,e1,ee1,q2,qq2,e2,ee2,...). */
+
+ for (k = (*n) << (1); k >= 2; k += -2) {
+ z__[k * 2] = 0.f;
+ z__[((k) << (1)) - 1] = z__[k];
+ z__[((k) << (1)) - 2] = 0.f;
+ z__[((k) << (1)) - 3] = z__[k - 1];
+/* L30: */
+ }
+
+ i0 = 1;
+ n0 = *n;
+
+/* Reverse the qd-array, if warranted. */
+
+ if (z__[((i0) << (2)) - 3] * 1.5f < z__[((n0) << (2)) - 3]) {
+ ipn4 = (i0 + n0) << (2);
+ i__1 = (i0 + n0 - 1) << (1);
+ for (i4 = (i0) << (2); i4 <= i__1; i4 += 4) {
+ temp = z__[i4 - 3];
+ z__[i4 - 3] = z__[ipn4 - i4 - 3];
+ z__[ipn4 - i4 - 3] = temp;
+ temp = z__[i4 - 1];
+ z__[i4 - 1] = z__[ipn4 - i4 - 5];
+ z__[ipn4 - i4 - 5] = temp;
+/* L40: */
+ }
+ }
+
+/* Initial split checking via dqd and Li's test. */
+
+ pp = 0;
+
+ for (k = 1; k <= 2; ++k) {
+
+ d__ = z__[((n0) << (2)) + pp - 3];
+ i__1 = ((i0) << (2)) + pp;
+ for (i4 = ((n0 - 1) << (2)) + pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.f;
+ d__ = z__[i4 - 3];
+ } else {
+ d__ = z__[i4 - 3] * (d__ / (d__ + z__[i4 - 1]));
+ }
+/* L50: */
+ }
+
+/* dqd maps Z to ZZ plus Li's test. */
+
+ emin = z__[((i0) << (2)) + pp + 1];
+ d__ = z__[((i0) << (2)) + pp - 3];
+ i__1 = ((n0 - 1) << (2)) + pp;
+ for (i4 = ((i0) << (2)) + pp; i4 <= i__1; i4 += 4) {
+ z__[i4 - ((pp) << (1)) - 2] = d__ + z__[i4 - 1];
+ if (z__[i4 - 1] <= tol2 * d__) {
+ z__[i4 - 1] = -0.f;
+ z__[i4 - ((pp) << (1)) - 2] = d__;
+ z__[i4 - ((pp) << (1))] = 0.f;
+ d__ = z__[i4 + 1];
+ } else if (safmin * z__[i4 + 1] < z__[i4 - ((pp) << (1)) - 2] &&
+ safmin * z__[i4 - ((pp) << (1)) - 2] < z__[i4 + 1]) {
+ temp = z__[i4 + 1] / z__[i4 - ((pp) << (1)) - 2];
+ z__[i4 - ((pp) << (1))] = z__[i4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[i4 - ((pp) << (1))] = z__[i4 + 1] * (z__[i4 - 1] / z__[i4
+ - ((pp) << (1)) - 2]);
+ d__ = z__[i4 + 1] * (d__ / z__[i4 - ((pp) << (1)) - 2]);
+ }
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - ((pp) << (1))];
+ emin = dmin(r__1,r__2);
+/* L60: */
+ }
+ z__[((n0) << (2)) - pp - 2] = d__;
+
+/* Now find qmax. */
+
+ qmax = z__[((i0) << (2)) - pp - 2];
+ i__1 = ((n0) << (2)) - pp - 2;
+ for (i4 = ((i0) << (2)) - pp + 2; i4 <= i__1; i4 += 4) {
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4];
+ qmax = dmax(r__1,r__2);
+/* L70: */
+ }
+
+/* Prepare for the next iteration on K. */
+
+ pp = 1 - pp;
+/* L80: */
+ }
+
+ iter = 2;
+ nfail = 0;
+ ndiv = (n0 - i0) << (1);
+
+ i__1 = *n + 1;
+ for (iwhila = 1; iwhila <= i__1; ++iwhila) {
+ if (n0 < 1) {
+ goto L150;
+ }
+
+/*
+ While array unfinished do
+
+ E(N0) holds the value of SIGMA when submatrix in I0:N0
+ splits from the rest of the array, but is negated.
+*/
+
+ desig = 0.f;
+ if (n0 == *n) {
+ sigma = 0.f;
+ } else {
+ sigma = -z__[((n0) << (2)) - 1];
+ }
+ if (sigma < 0.f) {
+ *info = 1;
+ return 0;
+ }
+
+/*
+ Find last unreduced submatrix's top index I0, find QMAX and
+ EMIN. Find Gershgorin-type bound if Q's much greater than E's.
+*/
+
+ emax = 0.f;
+ if (n0 > i0) {
+ emin = (r__1 = z__[((n0) << (2)) - 5], dabs(r__1));
+ } else {
+ emin = 0.f;
+ }
+ qmin = z__[((n0) << (2)) - 3];
+ qmax = qmin;
+ for (i4 = (n0) << (2); i4 >= 8; i4 += -4) {
+ if (z__[i4 - 5] <= 0.f) {
+ goto L100;
+ }
+ if (qmin >= emax * 4.f) {
+/* Computing MIN */
+ r__1 = qmin, r__2 = z__[i4 - 3];
+ qmin = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = emax, r__2 = z__[i4 - 5];
+ emax = dmax(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4 - 7] + z__[i4 - 5];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - 5];
+ emin = dmin(r__1,r__2);
+/* L90: */
+ }
+ i4 = 4;
+
+L100:
+ i0 = i4 / 4;
+
+/* Store EMIN for passing to SLASQ3. */
+
+ z__[((n0) << (2)) - 1] = emin;
+
+/*
+ Put -(initial shift) into DMIN.
+
+ Computing MAX
+*/
+ r__1 = 0.f, r__2 = qmin - sqrt(qmin) * 2.f * sqrt(emax);
+ dmin__ = -dmax(r__1,r__2);
+
+/* Now I0:N0 is unreduced. PP = 0 for ping, PP = 1 for pong. */
+
+ pp = 0;
+
+ nbig = (n0 - i0 + 1) * 30;
+ i__2 = nbig;
+ for (iwhilb = 1; iwhilb <= i__2; ++iwhilb) {
+ if (i0 > n0) {
+ goto L130;
+ }
+
+/* While submatrix unfinished take a good dqds step. */
+
+ slasq3_(&i0, &n0, &z__[1], &pp, &dmin__, &sigma, &desig, &qmax, &
+ nfail, &iter, &ndiv, &ieee);
+
+ pp = 1 - pp;
+
+/* When EMIN is very small check for splits. */
+
+ if (pp == 0 && n0 - i0 >= 3) {
+ if ((z__[n0 * 4] <= tol2 * qmax) || (z__[((n0) << (2)) - 1] <=
+ tol2 * sigma)) {
+ splt = i0 - 1;
+ qmax = z__[((i0) << (2)) - 3];
+ emin = z__[((i0) << (2)) - 1];
+ oldemn = z__[i0 * 4];
+ i__3 = (n0 - 3) << (2);
+ for (i4 = (i0) << (2); i4 <= i__3; i4 += 4) {
+ if ((z__[i4] <= tol2 * z__[i4 - 3]) || (z__[i4 - 1] <=
+ tol2 * sigma)) {
+ z__[i4 - 1] = -sigma;
+ splt = i4 / 4;
+ qmax = 0.f;
+ emin = z__[i4 + 3];
+ oldemn = z__[i4 + 4];
+ } else {
+/* Computing MAX */
+ r__1 = qmax, r__2 = z__[i4 + 1];
+ qmax = dmax(r__1,r__2);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[i4 - 1];
+ emin = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = oldemn, r__2 = z__[i4];
+ oldemn = dmin(r__1,r__2);
+ }
+/* L110: */
+ }
+ z__[((n0) << (2)) - 1] = emin;
+ z__[n0 * 4] = oldemn;
+ i0 = splt + 1;
+ }
+ }
+
+/* L120: */
+ }
+
+ *info = 2;
+ return 0;
+
+/* end IWHILB */
+
+L130:
+
+/* L140: */
+ ;
+ }
+
+ *info = 3;
+ return 0;
+
+/* end IWHILA */
+
+L150:
+
+/* Move q's to the front. */
+
+ i__1 = *n;
+ for (k = 2; k <= i__1; ++k) {
+ z__[k] = z__[((k) << (2)) - 3];
+/* L160: */
+ }
+
+/* Sort and compute sum of eigenvalues. */
+
+ slasrt_("D", n, &z__[1], &iinfo);
+
+ e = 0.f;
+ for (k = *n; k >= 1; --k) {
+ e += z__[k];
+/* L170: */
+ }
+
+/* Store trace, sum(eigenvalues) and information on performance. */
+
+ z__[((*n) << (1)) + 1] = trace;
+ z__[((*n) << (1)) + 2] = e;
+ z__[((*n) << (1)) + 3] = (real) iter;
+/* Computing 2nd power */
+ i__1 = *n;
+ z__[((*n) << (1)) + 4] = (real) ndiv / (real) (i__1 * i__1);
+ z__[((*n) << (1)) + 5] = nfail * 100.f / (real) iter;
+ return 0;
+
+/* End of SLASQ2 */
+
+} /* slasq2_ */
+
+/* Subroutine */ int slasq3_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *dmin__, real *sigma, real *desig, real *qmax, integer *nfail,
+ integer *iter, integer *ndiv, logical *ieee)
+{
+ /* Initialized data */
+
+ static integer ttype = 0;
+ static real dmin1 = 0.f;
+ static real dmin2 = 0.f;
+ static real dn = 0.f;
+ static real dn1 = 0.f;
+ static real dn2 = 0.f;
+ static real tau = 0.f;
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real s, t;
+ static integer j4, nn;
+ static real eps, tol;
+ static integer n0in, ipn4;
+ static real tol2, temp;
+ extern /* Subroutine */ int slasq4_(integer *, integer *, real *, integer
+ *, integer *, real *, real *, real *, real *, real *, real *,
+ real *, integer *), slasq5_(integer *, integer *, real *, integer
+ *, real *, real *, real *, real *, real *, real *, real *,
+ logical *), slasq6_(integer *, integer *, real *, integer *, real
+ *, real *, real *, real *, real *, real *);
+ extern doublereal slamch_(char *);
+ static real safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ SLASQ3 checks for deflation, computes a shift (TAU) and calls dqds.
+ In case of failure it changes shifts, and tries again until output
+ is positive.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) REAL array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) REAL
+ Minimum value of d.
+
+ SIGMA (output) REAL
+ Sum of shifts used in current segment.
+
+ DESIG (input/output) REAL
+ Lower order part of SIGMA
+
+ QMAX (input) REAL
+ Maximum value of q.
+
+ NFAIL (output) INTEGER
+ Number of times shift was too big.
+
+ ITER (output) INTEGER
+ Number of iterations.
+
+ NDIV (output) INTEGER
+ Number of divisions.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic (passed to SLASQ5).
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+ n0in = *n0;
+ eps = slamch_("Precision");
+ safmin = slamch_("Safe minimum");
+ tol = eps * 100.f;
+/* Computing 2nd power */
+ r__1 = tol;
+ tol2 = r__1 * r__1;
+
+/* Check for deflation. */
+
+L10:
+
+ if (*n0 < *i0) {
+ return 0;
+ }
+ if (*n0 == *i0) {
+ goto L20;
+ }
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0 == *i0 + 1) {
+ goto L40;
+ }
+
+/* Check whether E(N0-1) is negligible, 1 eigenvalue. */
+
+ if (z__[nn - 5] > tol2 * (*sigma + z__[nn - 3]) && z__[nn - ((*pp) << (1))
+ - 4] > tol2 * z__[nn - 7]) {
+ goto L30;
+ }
+
+L20:
+
+ z__[((*n0) << (2)) - 3] = z__[((*n0) << (2)) + *pp - 3] + *sigma;
+ --(*n0);
+ goto L10;
+
+/* Check whether E(N0-2) is negligible, 2 eigenvalues. */
+
+L30:
+
+ if (z__[nn - 9] > tol2 * *sigma && z__[nn - ((*pp) << (1)) - 8] > tol2 *
+ z__[nn - 11]) {
+ goto L50;
+ }
+
+L40:
+
+ if (z__[nn - 3] > z__[nn - 7]) {
+ s = z__[nn - 3];
+ z__[nn - 3] = z__[nn - 7];
+ z__[nn - 7] = s;
+ }
+ if (z__[nn - 5] > z__[nn - 3] * tol2) {
+ t = (z__[nn - 7] - z__[nn - 3] + z__[nn - 5]) * .5f;
+ s = z__[nn - 3] * (z__[nn - 5] / t);
+ if (s <= t) {
+ s = z__[nn - 3] * (z__[nn - 5] / (t * (sqrt(s / t + 1.f) + 1.f)));
+ } else {
+ s = z__[nn - 3] * (z__[nn - 5] / (t + sqrt(t) * sqrt(t + s)));
+ }
+ t = z__[nn - 7] + (s + z__[nn - 5]);
+ z__[nn - 3] *= z__[nn - 7] / t;
+ z__[nn - 7] = t;
+ }
+ z__[((*n0) << (2)) - 7] = z__[nn - 7] + *sigma;
+ z__[((*n0) << (2)) - 3] = z__[nn - 3] + *sigma;
+ *n0 += -2;
+ goto L10;
+
+L50:
+
+/* Reverse the qd-array, if warranted. */
+
+ if ((*dmin__ <= 0.f) || (*n0 < n0in)) {
+ if (z__[((*i0) << (2)) + *pp - 3] * 1.5f < z__[((*n0) << (2)) + *pp -
+ 3]) {
+ ipn4 = (*i0 + *n0) << (2);
+ i__1 = (*i0 + *n0 - 1) << (1);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ temp = z__[j4 - 3];
+ z__[j4 - 3] = z__[ipn4 - j4 - 3];
+ z__[ipn4 - j4 - 3] = temp;
+ temp = z__[j4 - 2];
+ z__[j4 - 2] = z__[ipn4 - j4 - 2];
+ z__[ipn4 - j4 - 2] = temp;
+ temp = z__[j4 - 1];
+ z__[j4 - 1] = z__[ipn4 - j4 - 5];
+ z__[ipn4 - j4 - 5] = temp;
+ temp = z__[j4];
+ z__[j4] = z__[ipn4 - j4 - 4];
+ z__[ipn4 - j4 - 4] = temp;
+/* L60: */
+ }
+ if (*n0 - *i0 <= 4) {
+ z__[((*n0) << (2)) + *pp - 1] = z__[((*i0) << (2)) + *pp - 1];
+ z__[((*n0) << (2)) - *pp] = z__[((*i0) << (2)) - *pp];
+ }
+/* Computing MIN */
+ r__1 = dmin2, r__2 = z__[((*n0) << (2)) + *pp - 1];
+ dmin2 = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = z__[((*n0) << (2)) + *pp - 1], r__2 = z__[((*i0) << (2)) +
+ *pp - 1], r__1 = min(r__1,r__2), r__2 = z__[((*i0) << (2))
+ + *pp + 3];
+ z__[((*n0) << (2)) + *pp - 1] = dmin(r__1,r__2);
+/* Computing MIN */
+ r__1 = z__[((*n0) << (2)) - *pp], r__2 = z__[((*i0) << (2)) - *pp]
+ , r__1 = min(r__1,r__2), r__2 = z__[((*i0) << (2)) - *pp
+ + 4];
+ z__[((*n0) << (2)) - *pp] = dmin(r__1,r__2);
+/* Computing MAX */
+ r__1 = *qmax, r__2 = z__[((*i0) << (2)) + *pp - 3], r__1 = max(
+ r__1,r__2), r__2 = z__[((*i0) << (2)) + *pp + 1];
+ *qmax = dmax(r__1,r__2);
+ *dmin__ = -0.f;
+ }
+ }
+
+/*
+ L70:
+
+ Computing MIN
+*/
+ r__1 = z__[((*n0) << (2)) + *pp - 1], r__2 = z__[((*n0) << (2)) + *pp - 9]
+ , r__1 = min(r__1,r__2), r__2 = dmin2 + z__[((*n0) << (2)) - *pp];
+ if ((*dmin__ < 0.f) || (safmin * *qmax < dmin(r__1,r__2))) {
+
+/* Choose a shift. */
+
+ slasq4_(i0, n0, &z__[1], pp, &n0in, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, &tau, &ttype);
+
+/* Call dqds until DMIN > 0. */
+
+L80:
+
+ slasq5_(i0, n0, &z__[1], pp, &tau, dmin__, &dmin1, &dmin2, &dn, &dn1,
+ &dn2, ieee);
+
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+
+/* Check status. */
+
+ if (*dmin__ >= 0.f && dmin1 > 0.f) {
+
+/* Success. */
+
+ goto L100;
+
+ } else if (*dmin__ < 0.f && dmin1 > 0.f && z__[((*n0 - 1) << (2)) - *
+ pp] < tol * (*sigma + dn1) && dabs(dn) < tol * *sigma) {
+
+/* Convergence hidden by negative DN. */
+
+ z__[((*n0 - 1) << (2)) - *pp + 2] = 0.f;
+ *dmin__ = 0.f;
+ goto L100;
+ } else if (*dmin__ < 0.f) {
+
+/* TAU too big. Select new TAU and try again. */
+
+ ++(*nfail);
+ if (ttype < -22) {
+
+/* Failed twice. Play it safe. */
+
+ tau = 0.f;
+ } else if (dmin1 > 0.f) {
+
+/* Late failure. Gives excellent shift. */
+
+ tau = (tau + *dmin__) * (1.f - eps * 2.f);
+ ttype += -11;
+ } else {
+
+/* Early failure. Divide by 4. */
+
+ tau *= .25f;
+ ttype += -12;
+ }
+ goto L80;
+ } else if (*dmin__ != *dmin__) {
+
+/* NaN. */
+
+ tau = 0.f;
+ goto L80;
+ } else {
+
+/* Possible underflow. Play it safe. */
+
+ goto L90;
+ }
+ }
+
+/* Risk of underflow. */
+
+L90:
+ slasq6_(i0, n0, &z__[1], pp, dmin__, &dmin1, &dmin2, &dn, &dn1, &dn2);
+ *ndiv += *n0 - *i0 + 2;
+ ++(*iter);
+ tau = 0.f;
+
+L100:
+ if (tau < *sigma) {
+ *desig += tau;
+ t = *sigma + *desig;
+ *desig -= t - *sigma;
+ } else {
+ t = *sigma + tau;
+ *desig = *sigma - (t - tau) + *desig;
+ }
+ *sigma = t;
+
+ return 0;
+
+/* End of SLASQ3 */
+
+} /* slasq3_ */
+
+/* Subroutine */ int slasq4_(integer *i0, integer *n0, real *z__, integer *pp,
+ integer *n0in, real *dmin__, real *dmin1, real *dmin2, real *dn,
+ real *dn1, real *dn2, real *tau, integer *ttype)
+{
+ /* Initialized data */
+
+ static real g = 0.f;
+
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real s, a2, b1, b2;
+ static integer i4, nn, np;
+ static real gam, gap1, gap2;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASQ4 computes an approximation TAU to the smallest eigenvalue
+ using values of d from the previous transform.
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) REAL array, dimension ( 4*N )
+ Z holds the qd array.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ NOIN (input) INTEGER
+ The value of N0 at start of EIGTEST.
+
+ DMIN (input) REAL
+ Minimum value of d.
+
+ DMIN1 (input) REAL
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (input) REAL
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (input) REAL
+ d(N)
+
+ DN1 (input) REAL
+ d(N-1)
+
+ DN2 (input) REAL
+ d(N-2)
+
+ TAU (output) REAL
+ This is the shift.
+
+ TTYPE (output) INTEGER
+ Shift type.
+
+ Further Details
+ ===============
+ CNST1 = 9/16
+
+ =====================================================================
+*/
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+
+/*
+ A negative DMIN forces the shift to take that absolute value
+ TTYPE records the type of shift.
+*/
+
+ if (*dmin__ <= 0.f) {
+ *tau = -(*dmin__);
+ *ttype = -1;
+ return 0;
+ }
+
+ nn = ((*n0) << (2)) + *pp;
+ if (*n0in == *n0) {
+
+/* No eigenvalues deflated. */
+
+ if ((*dmin__ == *dn) || (*dmin__ == *dn1)) {
+
+ b1 = sqrt(z__[nn - 3]) * sqrt(z__[nn - 5]);
+ b2 = sqrt(z__[nn - 7]) * sqrt(z__[nn - 9]);
+ a2 = z__[nn - 7] + z__[nn - 5];
+
+/* Cases 2 and 3. */
+
+ if (*dmin__ == *dn && *dmin1 == *dn1) {
+ gap2 = *dmin2 - a2 - *dmin2 * .25f;
+ if (gap2 > 0.f && gap2 > b2) {
+ gap1 = a2 - *dn - b2 / gap2 * b2;
+ } else {
+ gap1 = a2 - *dn - (b1 + b2);
+ }
+ if (gap1 > 0.f && gap1 > b1) {
+/* Computing MAX */
+ r__1 = *dn - b1 / gap1 * b1, r__2 = *dmin__ * .5f;
+ s = dmax(r__1,r__2);
+ *ttype = -2;
+ } else {
+ s = 0.f;
+ if (*dn > b1) {
+ s = *dn - b1;
+ }
+ if (a2 > b1 + b2) {
+/* Computing MIN */
+ r__1 = s, r__2 = a2 - (b1 + b2);
+ s = dmin(r__1,r__2);
+ }
+/* Computing MAX */
+ r__1 = s, r__2 = *dmin__ * .333f;
+ s = dmax(r__1,r__2);
+ *ttype = -3;
+ }
+ } else {
+
+/* Case 4. */
+
+ *ttype = -4;
+ s = *dmin__ * .25f;
+ if (*dmin__ == *dn) {
+ gam = *dn;
+ a2 = 0.f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b2 = z__[nn - 5] / z__[nn - 7];
+ np = nn - 9;
+ } else {
+ np = nn - ((*pp) << (1));
+ b2 = z__[np - 2];
+ gam = *dn1;
+ if (z__[np - 4] > z__[np - 2]) {
+ return 0;
+ }
+ a2 = z__[np - 4] / z__[np - 2];
+ if (z__[nn - 9] > z__[nn - 11]) {
+ return 0;
+ }
+ b2 = z__[nn - 9] / z__[nn - 11];
+ np = nn - 13;
+ }
+
+/* Approximate contribution to norm squared from I < NN-1. */
+
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = np; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.f) {
+ goto L20;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if ((dmax(b2,b1) * 100.f < a2) || (.563f < a2)) {
+ goto L20;
+ }
+/* L10: */
+ }
+L20:
+ a2 *= 1.05f;
+
+/* Rayleigh quotient residual bound. */
+
+ if (a2 < .563f) {
+ s = gam * (1.f - sqrt(a2)) / (a2 + 1.f);
+ }
+ }
+ } else if (*dmin__ == *dn2) {
+
+/* Case 5. */
+
+ *ttype = -5;
+ s = *dmin__ * .25f;
+
+/* Compute contribution to norm squared from I > NN-2. */
+
+ np = nn - ((*pp) << (1));
+ b1 = z__[np - 2];
+ b2 = z__[np - 6];
+ gam = *dn2;
+ if ((z__[np - 8] > b2) || (z__[np - 4] > b1)) {
+ return 0;
+ }
+ a2 = z__[np - 8] / b2 * (z__[np - 4] / b1 + 1.f);
+
+/* Approximate contribution to norm squared from I < NN-2. */
+
+ if (*n0 - *i0 > 2) {
+ b2 = z__[nn - 13] / z__[nn - 15];
+ a2 += b2;
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = nn - 17; i4 >= i__1; i4 += -4) {
+ if (b2 == 0.f) {
+ goto L40;
+ }
+ b1 = b2;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b2 *= z__[i4] / z__[i4 - 2];
+ a2 += b2;
+ if ((dmax(b2,b1) * 100.f < a2) || (.563f < a2)) {
+ goto L40;
+ }
+/* L30: */
+ }
+L40:
+ a2 *= 1.05f;
+ }
+
+ if (a2 < .563f) {
+ s = gam * (1.f - sqrt(a2)) / (a2 + 1.f);
+ }
+ } else {
+
+/* Case 6, no information to guide us. */
+
+ if (*ttype == -6) {
+ g += (1.f - g) * .333f;
+ } else if (*ttype == -18) {
+ g = .083250000000000005f;
+ } else {
+ g = .25f;
+ }
+ s = g * *dmin__;
+ *ttype = -6;
+ }
+
+ } else if (*n0in == *n0 + 1) {
+
+/* One eigenvalue just deflated. Use DMIN1, DN1 for DMIN and DN. */
+
+ if (*dmin1 == *dn1 && *dmin2 == *dn2) {
+
+/* Cases 7 and 8. */
+
+ *ttype = -7;
+ s = *dmin1 * .333f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.f) {
+ goto L60;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ a2 = b1;
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (dmax(b1,a2) * 100.f < b2) {
+ goto L60;
+ }
+/* L50: */
+ }
+L60:
+ b2 = sqrt(b2 * 1.05f);
+/* Computing 2nd power */
+ r__1 = b2;
+ a2 = *dmin1 / (r__1 * r__1 + 1.f);
+ gap2 = *dmin2 * .5f - a2;
+ if (gap2 > 0.f && gap2 > b2 * a2) {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - a2 * 1.01f * (b2 / gap2) * b2);
+ s = dmax(r__1,r__2);
+ } else {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - b2 * 1.01f);
+ s = dmax(r__1,r__2);
+ *ttype = -8;
+ }
+ } else {
+
+/* Case 9. */
+
+ s = *dmin1 * .25f;
+ if (*dmin1 == *dn1) {
+ s = *dmin1 * .5f;
+ }
+ *ttype = -9;
+ }
+
+ } else if (*n0in == *n0 + 2) {
+
+/*
+ Two eigenvalues deflated. Use DMIN2, DN2 for DMIN and DN.
+
+ Cases 10 and 11.
+*/
+
+ if (*dmin2 == *dn2 && z__[nn - 5] * 2.f < z__[nn - 7]) {
+ *ttype = -10;
+ s = *dmin2 * .333f;
+ if (z__[nn - 5] > z__[nn - 7]) {
+ return 0;
+ }
+ b1 = z__[nn - 5] / z__[nn - 7];
+ b2 = b1;
+ if (b2 == 0.f) {
+ goto L80;
+ }
+ i__1 = ((*i0) << (2)) - 1 + *pp;
+ for (i4 = ((*n0) << (2)) - 9 + *pp; i4 >= i__1; i4 += -4) {
+ if (z__[i4] > z__[i4 - 2]) {
+ return 0;
+ }
+ b1 *= z__[i4] / z__[i4 - 2];
+ b2 += b1;
+ if (b1 * 100.f < b2) {
+ goto L80;
+ }
+/* L70: */
+ }
+L80:
+ b2 = sqrt(b2 * 1.05f);
+/* Computing 2nd power */
+ r__1 = b2;
+ a2 = *dmin2 / (r__1 * r__1 + 1.f);
+ gap2 = z__[nn - 7] + z__[nn - 9] - sqrt(z__[nn - 11]) * sqrt(z__[
+ nn - 9]) - a2;
+ if (gap2 > 0.f && gap2 > b2 * a2) {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - a2 * 1.01f * (b2 / gap2) * b2);
+ s = dmax(r__1,r__2);
+ } else {
+/* Computing MAX */
+ r__1 = s, r__2 = a2 * (1.f - b2 * 1.01f);
+ s = dmax(r__1,r__2);
+ }
+ } else {
+ s = *dmin2 * .25f;
+ *ttype = -11;
+ }
+ } else if (*n0in > *n0 + 2) {
+
+/* Case 12, more than two eigenvalues deflated. No information. */
+
+ s = 0.f;
+ *ttype = -12;
+ }
+
+ *tau = s;
+ return 0;
+
+/* End of SLASQ4 */
+
+} /* slasq4_ */
+
+/* Subroutine */ int slasq5_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *tau, real *dmin__, real *dmin1, real *dmin2, real *dn, real *
+ dnm1, real *dnm2, logical *ieee)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static real d__;
+ static integer j4, j4p2;
+ static real emin, temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ May 17, 2000
+
+
+ Purpose
+ =======
+
+ SLASQ5 computes one dqds transform in ping-pong form, one
+ version for IEEE machines another for non IEEE machines.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) REAL array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ TAU (input) REAL
+ This is the shift.
+
+ DMIN (output) REAL
+ Minimum value of d.
+
+ DMIN1 (output) REAL
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) REAL
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) REAL
+ d(N0), the last value of d.
+
+ DNM1 (output) REAL
+ d(N0-1).
+
+ DNM2 (output) REAL
+ d(N0-2).
+
+ IEEE (input) LOGICAL
+ Flag for IEEE or non IEEE arithmetic.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4] - *tau;
+ *dmin__ = d__;
+ *dmin1 = -z__[j4];
+
+ if (*ieee) {
+
+/* Code for IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ d__ = d__ * temp - *tau;
+ *dmin__ = dmin(*dmin__,d__);
+ z__[j4] = z__[j4 - 1] * temp;
+/* Computing MIN */
+ r__1 = z__[j4];
+ emin = dmin(r__1,emin);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ d__ = d__ * temp - *tau;
+ *dmin__ = dmin(*dmin__,d__);
+ z__[j4 - 1] = z__[j4] * temp;
+/* Computing MIN */
+ r__1 = z__[j4 - 1];
+ emin = dmin(r__1,emin);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ *dmin__ = dmin(*dmin__,*dn);
+
+ } else {
+
+/* Code for non IEEE arithmetic. */
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (d__ < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4];
+ emin = dmin(r__1,r__2);
+/* L30: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (d__ < 0.f) {
+ return 0;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4 - 1];
+ emin = dmin(r__1,r__2);
+/* L40: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (*dnm2 < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (*dnm1 < 0.f) {
+ return 0;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]) - *tau;
+ }
+ *dmin__ = dmin(*dmin__,*dn);
+
+ }
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of SLASQ5 */
+
+} /* slasq5_ */
+
+/* Subroutine */ int slasq6_(integer *i0, integer *n0, real *z__, integer *pp,
+ real *dmin__, real *dmin1, real *dmin2, real *dn, real *dnm1, real *
+ dnm2)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2;
+
+ /* Local variables */
+ static real d__;
+ static integer j4, j4p2;
+ static real emin, temp;
+ extern doublereal slamch_(char *);
+ static real safmin;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1999
+
+
+ Purpose
+ =======
+
+ SLASQ6 computes one dqd (shift equal to zero) transform in
+ ping-pong form, with protection against underflow and overflow.
+
+ Arguments
+ =========
+
+ I0 (input) INTEGER
+ First index.
+
+ N0 (input) INTEGER
+ Last index.
+
+ Z (input) REAL array, dimension ( 4*N )
+ Z holds the qd array. EMIN is stored in Z(4*N0) to avoid
+ an extra argument.
+
+ PP (input) INTEGER
+ PP=0 for ping, PP=1 for pong.
+
+ DMIN (output) REAL
+ Minimum value of d.
+
+ DMIN1 (output) REAL
+ Minimum value of d, excluding D( N0 ).
+
+ DMIN2 (output) REAL
+ Minimum value of d, excluding D( N0 ) and D( N0-1 ).
+
+ DN (output) REAL
+ d(N0), the last value of d.
+
+ DNM1 (output) REAL
+ d(N0-1).
+
+ DNM2 (output) REAL
+ d(N0-2).
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --z__;
+
+ /* Function Body */
+ if (*n0 - *i0 - 1 <= 0) {
+ return 0;
+ }
+
+ safmin = slamch_("Safe minimum");
+ j4 = ((*i0) << (2)) + *pp - 3;
+ emin = z__[j4 + 4];
+ d__ = z__[j4];
+ *dmin__ = d__;
+
+ if (*pp == 0) {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 2] = d__ + z__[j4 - 1];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ d__ = z__[j4 + 1];
+ *dmin__ = d__;
+ emin = 0.f;
+ } else if (safmin * z__[j4 + 1] < z__[j4 - 2] && safmin * z__[j4
+ - 2] < z__[j4 + 1]) {
+ temp = z__[j4 + 1] / z__[j4 - 2];
+ z__[j4] = z__[j4 - 1] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4] = z__[j4 + 1] * (z__[j4 - 1] / z__[j4 - 2]);
+ d__ = z__[j4 + 1] * (d__ / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4];
+ emin = dmin(r__1,r__2);
+/* L10: */
+ }
+ } else {
+ i__1 = (*n0 - 3) << (2);
+ for (j4 = (*i0) << (2); j4 <= i__1; j4 += 4) {
+ z__[j4 - 3] = d__ + z__[j4];
+ if (z__[j4 - 3] == 0.f) {
+ z__[j4 - 1] = 0.f;
+ d__ = z__[j4 + 2];
+ *dmin__ = d__;
+ emin = 0.f;
+ } else if (safmin * z__[j4 + 2] < z__[j4 - 3] && safmin * z__[j4
+ - 3] < z__[j4 + 2]) {
+ temp = z__[j4 + 2] / z__[j4 - 3];
+ z__[j4 - 1] = z__[j4] * temp;
+ d__ *= temp;
+ } else {
+ z__[j4 - 1] = z__[j4 + 2] * (z__[j4] / z__[j4 - 3]);
+ d__ = z__[j4 + 2] * (d__ / z__[j4 - 3]);
+ }
+ *dmin__ = dmin(*dmin__,d__);
+/* Computing MIN */
+ r__1 = emin, r__2 = z__[j4 - 1];
+ emin = dmin(r__1,r__2);
+/* L20: */
+ }
+ }
+
+/* Unroll last two steps. */
+
+ *dnm2 = d__;
+ *dmin2 = *dmin__;
+ j4 = ((*n0 - 2) << (2)) - *pp;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm2 + z__[j4p2];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ *dnm1 = z__[j4p2 + 2];
+ *dmin__ = *dnm1;
+ emin = 0.f;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dnm1 = *dnm2 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dnm1 = z__[j4p2 + 2] * (*dnm2 / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,*dnm1);
+
+ *dmin1 = *dmin__;
+ j4 += 4;
+ j4p2 = j4 + ((*pp) << (1)) - 1;
+ z__[j4 - 2] = *dnm1 + z__[j4p2];
+ if (z__[j4 - 2] == 0.f) {
+ z__[j4] = 0.f;
+ *dn = z__[j4p2 + 2];
+ *dmin__ = *dn;
+ emin = 0.f;
+ } else if (safmin * z__[j4p2 + 2] < z__[j4 - 2] && safmin * z__[j4 - 2] <
+ z__[j4p2 + 2]) {
+ temp = z__[j4p2 + 2] / z__[j4 - 2];
+ z__[j4] = z__[j4p2] * temp;
+ *dn = *dnm1 * temp;
+ } else {
+ z__[j4] = z__[j4p2 + 2] * (z__[j4p2] / z__[j4 - 2]);
+ *dn = z__[j4p2 + 2] * (*dnm1 / z__[j4 - 2]);
+ }
+ *dmin__ = dmin(*dmin__,*dn);
+
+ z__[j4 + 2] = *dn;
+ z__[((*n0) << (2)) - *pp] = emin;
+ return 0;
+
+/* End of SLASQ6 */
+
+} /* slasq6_ */
+
+/* Subroutine */ int slasr_(char *side, char *pivot, char *direct, integer *m,
+ integer *n, real *c__, real *s, real *a, integer *lda)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, info;
+ static real temp;
+ extern logical lsame_(char *, char *);
+ static real ctemp, stemp;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLASR performs the transformation
+
+ A := P*A, when SIDE = 'L' or 'l' ( Left-hand side )
+
+ A := A*P', when SIDE = 'R' or 'r' ( Right-hand side )
+
+ where A is an m by n real matrix and P is an orthogonal matrix,
+ consisting of a sequence of plane rotations determined by the
+ parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l'
+ and z = n when SIDE = 'R' or 'r' ):
+
+ When DIRECT = 'F' or 'f' ( Forward sequence ) then
+
+ P = P( z - 1 )*...*P( 2 )*P( 1 ),
+
+ and when DIRECT = 'B' or 'b' ( Backward sequence ) then
+
+ P = P( 1 )*P( 2 )*...*P( z - 1 ),
+
+ where P( k ) is a plane rotation matrix for the following planes:
+
+ when PIVOT = 'V' or 'v' ( Variable pivot ),
+ the plane ( k, k + 1 )
+
+ when PIVOT = 'T' or 't' ( Top pivot ),
+ the plane ( 1, k + 1 )
+
+ when PIVOT = 'B' or 'b' ( Bottom pivot ),
+ the plane ( k, z )
+
+ c( k ) and s( k ) must contain the cosine and sine that define the
+ matrix P( k ). The two by two plane rotation part of the matrix
+ P( k ), R( k ), is assumed to be of the form
+
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ This version vectorises across rows of the array A when SIDE = 'L'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ Specifies whether the plane rotation matrix P is applied to
+ A on the left or the right.
+ = 'L': Left, compute A := P*A
+ = 'R': Right, compute A:= A*P'
+
+ DIRECT (input) CHARACTER*1
+ Specifies whether P is a forward or backward sequence of
+ plane rotations.
+ = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 )
+ = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 )
+
+ PIVOT (input) CHARACTER*1
+ Specifies the plane for which P(k) is a plane rotation
+ matrix.
+ = 'V': Variable pivot, the plane (k,k+1)
+ = 'T': Top pivot, the plane (1,k+1)
+ = 'B': Bottom pivot, the plane (k,z)
+
+ M (input) INTEGER
+ The number of rows of the matrix A. If m <= 1, an immediate
+ return is effected.
+
+ N (input) INTEGER
+ The number of columns of the matrix A. If n <= 1, an
+ immediate return is effected.
+
+ C, S (input) REAL arrays, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ c(k) and s(k) contain the cosine and sine that define the
+ matrix P(k). The two by two plane rotation part of the
+ matrix P(k), R(k), is assumed to be of the form
+ R( k ) = ( c( k ) s( k ) ).
+ ( -s( k ) c( k ) )
+
+ A (input/output) REAL array, dimension (LDA,N)
+ The m by n matrix A. On exit, A is overwritten by P*A if
+ SIDE = 'R' or by A*P' if SIDE = 'L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --c__;
+ --s;
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ info = 0;
+ if (! ((lsame_(side, "L")) || (lsame_(side, "R")))) {
+ info = 1;
+ } else if (! (((lsame_(pivot, "V")) || (lsame_(
+ pivot, "T"))) || (lsame_(pivot, "B")))) {
+ info = 2;
+ } else if (! ((lsame_(direct, "F")) || (lsame_(
+ direct, "B")))) {
+ info = 3;
+ } else if (*m < 0) {
+ info = 4;
+ } else if (*n < 0) {
+ info = 5;
+ } else if (*lda < max(1,*m)) {
+ info = 9;
+ }
+ if (info != 0) {
+ xerbla_("SLASR ", &info);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+ if (lsame_(side, "L")) {
+
+/* Form P * A */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L10: */
+ }
+ }
+/* L20: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + 1 + i__ * a_dim1];
+ a[j + 1 + i__ * a_dim1] = ctemp * temp - stemp *
+ a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * temp + ctemp * a[j
+ + i__ * a_dim1];
+/* L30: */
+ }
+ }
+/* L40: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L50: */
+ }
+ }
+/* L60: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = ctemp * temp - stemp * a[
+ i__ * a_dim1 + 1];
+ a[i__ * a_dim1 + 1] = stemp * temp + ctemp * a[
+ i__ * a_dim1 + 1];
+/* L70: */
+ }
+ }
+/* L80: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *m - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L90: */
+ }
+ }
+/* L100: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *m - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[j + i__ * a_dim1];
+ a[j + i__ * a_dim1] = stemp * a[*m + i__ * a_dim1]
+ + ctemp * temp;
+ a[*m + i__ * a_dim1] = ctemp * a[*m + i__ *
+ a_dim1] - stemp * temp;
+/* L110: */
+ }
+ }
+/* L120: */
+ }
+ }
+ }
+ } else if (lsame_(side, "R")) {
+
+/* Form A * P' */
+
+ if (lsame_(pivot, "V")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L130: */
+ }
+ }
+/* L140: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + (j + 1) * a_dim1];
+ a[i__ + (j + 1) * a_dim1] = ctemp * temp - stemp *
+ a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * temp + ctemp * a[
+ i__ + j * a_dim1];
+/* L150: */
+ }
+ }
+/* L160: */
+ }
+ }
+ } else if (lsame_(pivot, "T")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L170: */
+ }
+ }
+/* L180: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n; j >= 2; --j) {
+ ctemp = c__[j - 1];
+ stemp = s[j - 1];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = ctemp * temp - stemp * a[
+ i__ + a_dim1];
+ a[i__ + a_dim1] = stemp * temp + ctemp * a[i__ +
+ a_dim1];
+/* L190: */
+ }
+ }
+/* L200: */
+ }
+ }
+ } else if (lsame_(pivot, "B")) {
+ if (lsame_(direct, "F")) {
+ i__1 = *n - 1;
+ for (j = 1; j <= i__1; ++j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__2 = *m;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L210: */
+ }
+ }
+/* L220: */
+ }
+ } else if (lsame_(direct, "B")) {
+ for (j = *n - 1; j >= 1; --j) {
+ ctemp = c__[j];
+ stemp = s[j];
+ if ((ctemp != 1.f) || (stemp != 0.f)) {
+ i__1 = *m;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ temp = a[i__ + j * a_dim1];
+ a[i__ + j * a_dim1] = stemp * a[i__ + *n * a_dim1]
+ + ctemp * temp;
+ a[i__ + *n * a_dim1] = ctemp * a[i__ + *n *
+ a_dim1] - stemp * temp;
+/* L230: */
+ }
+ }
+/* L240: */
+ }
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLASR */
+
+} /* slasr_ */
+
+/* Subroutine */ int slasrt_(char *id, integer *n, real *d__, integer *info)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j;
+ static real d1, d2, d3;
+ static integer dir;
+ static real tmp;
+ static integer endd;
+ extern logical lsame_(char *, char *);
+ static integer stack[64] /* was [2][32] */;
+ static real dmnmx;
+ static integer start;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static integer stkpnt;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ Sort the numbers in D in increasing order (if ID = 'I') or
+ in decreasing order (if ID = 'D' ).
+
+ Use Quick Sort, reverting to Insertion sort on arrays of
+ size <= 20. Dimension of STACK limits N to about 2**32.
+
+ Arguments
+ =========
+
+ ID (input) CHARACTER*1
+ = 'I': sort D in increasing order;
+ = 'D': sort D in decreasing order.
+
+ N (input) INTEGER
+ The length of the array D.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the array to be sorted.
+ On exit, D has been sorted into increasing order
+ (D(1) <= ... <= D(N) ) or into decreasing order
+ (D(1) >= ... >= D(N) ), depending on ID.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input paramters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+ dir = -1;
+ if (lsame_(id, "D")) {
+ dir = 0;
+ } else if (lsame_(id, "I")) {
+ dir = 1;
+ }
+ if (dir == -1) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLASRT", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 1) {
+ return 0;
+ }
+
+ stkpnt = 1;
+ stack[0] = 1;
+ stack[1] = *n;
+L10:
+ start = stack[((stkpnt) << (1)) - 2];
+ endd = stack[((stkpnt) << (1)) - 1];
+ --stkpnt;
+ if (endd - start <= 20 && endd - start > 0) {
+
+/* Do Insertion sort on D( START:ENDD ) */
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] > d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L30;
+ }
+/* L20: */
+ }
+L30:
+ ;
+ }
+
+ } else {
+
+/* Sort into increasing order */
+
+ i__1 = endd;
+ for (i__ = start + 1; i__ <= i__1; ++i__) {
+ i__2 = start + 1;
+ for (j = i__; j >= i__2; --j) {
+ if (d__[j] < d__[j - 1]) {
+ dmnmx = d__[j];
+ d__[j] = d__[j - 1];
+ d__[j - 1] = dmnmx;
+ } else {
+ goto L50;
+ }
+/* L40: */
+ }
+L50:
+ ;
+ }
+
+ }
+
+ } else if (endd - start > 20) {
+
+/*
+ Partition D( START:ENDD ) and stack parts, largest one first
+
+ Choose partition entry as median of 3
+*/
+
+ d1 = d__[start];
+ d2 = d__[endd];
+ i__ = (start + endd) / 2;
+ d3 = d__[i__];
+ if (d1 < d2) {
+ if (d3 < d1) {
+ dmnmx = d1;
+ } else if (d3 < d2) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d2;
+ }
+ } else {
+ if (d3 < d2) {
+ dmnmx = d2;
+ } else if (d3 < d1) {
+ dmnmx = d3;
+ } else {
+ dmnmx = d1;
+ }
+ }
+
+ if (dir == 0) {
+
+/* Sort into decreasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L60:
+L70:
+ --j;
+ if (d__[j] < dmnmx) {
+ goto L70;
+ }
+L80:
+ ++i__;
+ if (d__[i__] > dmnmx) {
+ goto L80;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L60;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ } else {
+
+/* Sort into increasing order */
+
+ i__ = start - 1;
+ j = endd + 1;
+L90:
+L100:
+ --j;
+ if (d__[j] > dmnmx) {
+ goto L100;
+ }
+L110:
+ ++i__;
+ if (d__[i__] < dmnmx) {
+ goto L110;
+ }
+ if (i__ < j) {
+ tmp = d__[i__];
+ d__[i__] = d__[j];
+ d__[j] = tmp;
+ goto L90;
+ }
+ if (j - start > endd - j - 1) {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ } else {
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = j + 1;
+ stack[((stkpnt) << (1)) - 1] = endd;
+ ++stkpnt;
+ stack[((stkpnt) << (1)) - 2] = start;
+ stack[((stkpnt) << (1)) - 1] = j;
+ }
+ }
+ }
+ if (stkpnt > 0) {
+ goto L10;
+ }
+ return 0;
+
+/* End of SLASRT */
+
+} /* slasrt_ */
+
+/* Subroutine */ int slassq_(integer *n, real *x, integer *incx, real *scale,
+ real *sumsq)
+{
+ /* System generated locals */
+ integer i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer ix;
+ static real absxi;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASSQ returns the values scl and smsq such that
+
+ ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
+
+ where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is
+ assumed to be non-negative and scl returns the value
+
+ scl = max( scale, abs( x( i ) ) ).
+
+ scale and sumsq must be supplied in SCALE and SUMSQ and
+ scl and smsq are overwritten on SCALE and SUMSQ respectively.
+
+ The routine makes only one pass through the vector x.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of elements to be used from the vector X.
+
+ X (input) REAL array, dimension (N)
+ The vector for which a scaled sum of squares is computed.
+ x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
+
+ INCX (input) INTEGER
+ The increment between successive values of the vector X.
+ INCX > 0.
+
+ SCALE (input/output) REAL
+ On entry, the value scale in the equation above.
+ On exit, SCALE is overwritten with scl , the scaling factor
+ for the sum of squares.
+
+ SUMSQ (input/output) REAL
+ On entry, the value sumsq in the equation above.
+ On exit, SUMSQ is overwritten with smsq , the basic sum of
+ squares from which scl has been factored out.
+
+ =====================================================================
+*/
+
+
+ /* Parameter adjustments */
+ --x;
+
+ /* Function Body */
+ if (*n > 0) {
+ i__1 = (*n - 1) * *incx + 1;
+ i__2 = *incx;
+ for (ix = 1; i__2 < 0 ? ix >= i__1 : ix <= i__1; ix += i__2) {
+ if (x[ix] != 0.f) {
+ absxi = (r__1 = x[ix], dabs(r__1));
+ if (*scale < absxi) {
+/* Computing 2nd power */
+ r__1 = *scale / absxi;
+ *sumsq = *sumsq * (r__1 * r__1) + 1;
+ *scale = absxi;
+ } else {
+/* Computing 2nd power */
+ r__1 = absxi / *scale;
+ *sumsq += r__1 * r__1;
+ }
+ }
+/* L10: */
+ }
+ }
+ return 0;
+
+/* End of SLASSQ */
+
+} /* slassq_ */
+
+/* Subroutine */ int slasv2_(real *f, real *g, real *h__, real *ssmin, real *
+ ssmax, real *snr, real *csr, real *snl, real *csl)
+{
+ /* System generated locals */
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static real a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, clt,
+ crt, slt, srt;
+ static integer pmax;
+ static real temp;
+ static logical swap;
+ static real tsign;
+ static logical gasmal;
+ extern doublereal slamch_(char *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLASV2 computes the singular value decomposition of a 2-by-2
+ triangular matrix
+ [ F G ]
+ [ 0 H ].
+ On return, abs(SSMAX) is the larger singular value, abs(SSMIN) is the
+ smaller singular value, and (CSL,SNL) and (CSR,SNR) are the left and
+ right singular vectors for abs(SSMAX), giving the decomposition
+
+ [ CSL SNL ] [ F G ] [ CSR -SNR ] = [ SSMAX 0 ]
+ [-SNL CSL ] [ 0 H ] [ SNR CSR ] [ 0 SSMIN ].
+
+ Arguments
+ =========
+
+ F (input) REAL
+ The (1,1) element of the 2-by-2 matrix.
+
+ G (input) REAL
+ The (1,2) element of the 2-by-2 matrix.
+
+ H (input) REAL
+ The (2,2) element of the 2-by-2 matrix.
+
+ SSMIN (output) REAL
+ abs(SSMIN) is the smaller singular value.
+
+ SSMAX (output) REAL
+ abs(SSMAX) is the larger singular value.
+
+ SNL (output) REAL
+ CSL (output) REAL
+ The vector (CSL, SNL) is a unit left singular vector for the
+ singular value abs(SSMAX).
+
+ SNR (output) REAL
+ CSR (output) REAL
+ The vector (CSR, SNR) is a unit right singular vector for the
+ singular value abs(SSMAX).
+
+ Further Details
+ ===============
+
+ Any input parameter may be aliased with any output parameter.
+
+ Barring over/underflow and assuming a guard digit in subtraction, all
+ output quantities are correct to within a few units in the last
+ place (ulps).
+
+ In IEEE arithmetic, the code works correctly if one matrix element is
+ infinite.
+
+ Overflow will not occur unless the largest singular value itself
+ overflows or is within a few ulps of overflow. (On machines with
+ partial overflow, like the Cray, overflow may occur if the largest
+ singular value is within a factor of 2 of overflow.)
+
+ Underflow is harmless if underflow is gradual. Otherwise, results
+ may correspond to a matrix modified by perturbations of size near
+ the underflow threshold.
+
+ =====================================================================
+*/
+
+
+ ft = *f;
+ fa = dabs(ft);
+ ht = *h__;
+ ha = dabs(*h__);
+
+/*
+ PMAX points to the maximum absolute element of matrix
+ PMAX = 1 if F largest in absolute values
+ PMAX = 2 if G largest in absolute values
+ PMAX = 3 if H largest in absolute values
+*/
+
+ pmax = 1;
+ swap = ha > fa;
+ if (swap) {
+ pmax = 3;
+ temp = ft;
+ ft = ht;
+ ht = temp;
+ temp = fa;
+ fa = ha;
+ ha = temp;
+
+/* Now FA .ge. HA */
+
+ }
+ gt = *g;
+ ga = dabs(gt);
+ if (ga == 0.f) {
+
+/* Diagonal matrix */
+
+ *ssmin = ha;
+ *ssmax = fa;
+ clt = 1.f;
+ crt = 1.f;
+ slt = 0.f;
+ srt = 0.f;
+ } else {
+ gasmal = TRUE_;
+ if (ga > fa) {
+ pmax = 2;
+ if (fa / ga < slamch_("EPS")) {
+
+/* Case of very large GA */
+
+ gasmal = FALSE_;
+ *ssmax = ga;
+ if (ha > 1.f) {
+ *ssmin = fa / (ga / ha);
+ } else {
+ *ssmin = fa / ga * ha;
+ }
+ clt = 1.f;
+ slt = ht / gt;
+ srt = 1.f;
+ crt = ft / gt;
+ }
+ }
+ if (gasmal) {
+
+/* Normal case */
+
+ d__ = fa - ha;
+ if (d__ == fa) {
+
+/* Copes with infinite F or H */
+
+ l = 1.f;
+ } else {
+ l = d__ / fa;
+ }
+
+/* Note that 0 .le. L .le. 1 */
+
+ m = gt / ft;
+
+/* Note that abs(M) .le. 1/macheps */
+
+ t = 2.f - l;
+
+/* Note that T .ge. 1 */
+
+ mm = m * m;
+ tt = t * t;
+ s = sqrt(tt + mm);
+
+/* Note that 1 .le. S .le. 1 + 1/macheps */
+
+ if (l == 0.f) {
+ r__ = dabs(m);
+ } else {
+ r__ = sqrt(l * l + mm);
+ }
+
+/* Note that 0 .le. R .le. 1 + 1/macheps */
+
+ a = (s + r__) * .5f;
+
+/* Note that 1 .le. A .le. 1 + abs(M) */
+
+ *ssmin = ha / a;
+ *ssmax = fa * a;
+ if (mm == 0.f) {
+
+/* Note that M is very tiny */
+
+ if (l == 0.f) {
+ t = r_sign(&c_b9647, &ft) * r_sign(&c_b1011, &gt);
+ } else {
+ t = gt / r_sign(&d__, &ft) + m / t;
+ }
+ } else {
+ t = (m / (s + t) + m / (r__ + l)) * (a + 1.f);
+ }
+ l = sqrt(t * t + 4.f);
+ crt = 2.f / l;
+ srt = t / l;
+ clt = (crt + srt * m) / a;
+ slt = ht / ft * srt / a;
+ }
+ }
+ if (swap) {
+ *csl = srt;
+ *snl = crt;
+ *csr = slt;
+ *snr = clt;
+ } else {
+ *csl = clt;
+ *snl = slt;
+ *csr = crt;
+ *snr = srt;
+ }
+
+/* Correct signs of SSMAX and SSMIN */
+
+ if (pmax == 1) {
+ tsign = r_sign(&c_b1011, csr) * r_sign(&c_b1011, csl) * r_sign(&
+ c_b1011, f);
+ }
+ if (pmax == 2) {
+ tsign = r_sign(&c_b1011, snr) * r_sign(&c_b1011, csl) * r_sign(&
+ c_b1011, g);
+ }
+ if (pmax == 3) {
+ tsign = r_sign(&c_b1011, snr) * r_sign(&c_b1011, snl) * r_sign(&
+ c_b1011, h__);
+ }
+ *ssmax = r_sign(ssmax, &tsign);
+ r__1 = tsign * r_sign(&c_b1011, f) * r_sign(&c_b1011, h__);
+ *ssmin = r_sign(ssmin, &r__1);
+ return 0;
+
+/* End of SLASV2 */
+
+} /* slasv2_ */
+
+/* Subroutine */ int slaswp_(integer *n, real *a, integer *lda, integer *k1,
+ integer *k2, integer *ipiv, integer *incx)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, j, k, i1, i2, n32, ip, ix, ix0, inc;
+ static real temp;
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SLASWP performs a series of row interchanges on the matrix A.
+ One row interchange is initiated for each of rows K1 through K2 of A.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The number of columns of the matrix A.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the matrix of column dimension N to which the row
+ interchanges will be applied.
+ On exit, the permuted matrix.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+
+ K1 (input) INTEGER
+ The first element of IPIV for which a row interchange will
+ be done.
+
+ K2 (input) INTEGER
+ The last element of IPIV for which a row interchange will
+ be done.
+
+ IPIV (input) INTEGER array, dimension (M*abs(INCX))
+ The vector of pivot indices. Only the elements in positions
+ K1 through K2 of IPIV are accessed.
+ IPIV(K) = L implies rows K and L are to be interchanged.
+
+ INCX (input) INTEGER
+ The increment between successive values of IPIV. If IPIV
+ is negative, the pivots are applied in reverse order.
+
+ Further Details
+ ===============
+
+ Modified by
+ R. C. Whaley, Computer Science Dept., Univ. of Tenn., Knoxville, USA
+
+ =====================================================================
+
+
+ Interchange row I with row IPIV(I) for each of rows K1 through K2.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --ipiv;
+
+ /* Function Body */
+ if (*incx > 0) {
+ ix0 = *k1;
+ i1 = *k1;
+ i2 = *k2;
+ inc = 1;
+ } else if (*incx < 0) {
+ ix0 = (1 - *k2) * *incx + 1;
+ i1 = *k2;
+ i2 = *k1;
+ inc = -1;
+ } else {
+ return 0;
+ }
+
+ n32 = (*n / 32) << (5);
+ if (n32 != 0) {
+ i__1 = n32;
+ for (j = 1; j <= i__1; j += 32) {
+ ix = ix0;
+ i__2 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__3)
+ {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__4 = j + 31;
+ for (k = j; k <= i__4; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L10: */
+ }
+ }
+ ix += *incx;
+/* L20: */
+ }
+/* L30: */
+ }
+ }
+ if (n32 != *n) {
+ ++n32;
+ ix = ix0;
+ i__1 = i2;
+ i__3 = inc;
+ for (i__ = i1; i__3 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__3) {
+ ip = ipiv[ix];
+ if (ip != i__) {
+ i__2 = *n;
+ for (k = n32; k <= i__2; ++k) {
+ temp = a[i__ + k * a_dim1];
+ a[i__ + k * a_dim1] = a[ip + k * a_dim1];
+ a[ip + k * a_dim1] = temp;
+/* L40: */
+ }
+ }
+ ix += *incx;
+/* L50: */
+ }
+ }
+
+ return 0;
+
+/* End of SLASWP */
+
+} /* slaswp_ */
+
+/* Subroutine */ int slatrd_(char *uplo, integer *n, integer *nb, real *a,
+ integer *lda, real *e, real *tau, real *w, integer *ldw)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, w_dim1, w_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, iw;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ static real alpha;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *), saxpy_(
+ integer *, real *, real *, integer *, real *, integer *), ssymv_(
+ char *, integer *, real *, real *, integer *, real *, integer *,
+ real *, real *, integer *), slarfg_(integer *, real *,
+ real *, integer *, real *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SLATRD reduces NB rows and columns of a real symmetric matrix A to
+ symmetric tridiagonal form by an orthogonal similarity
+ transformation Q' * A * Q, and returns the matrices V and W which are
+ needed to apply the transformation to the unreduced part of A.
+
+ If UPLO = 'U', SLATRD reduces the last NB rows and columns of a
+ matrix, of which the upper triangle is supplied;
+ if UPLO = 'L', SLATRD reduces the first NB rows and columns of a
+ matrix, of which the lower triangle is supplied.
+
+ This is an auxiliary routine called by SSYTRD.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A.
+
+ NB (input) INTEGER
+ The number of rows and columns to be reduced.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit:
+ if UPLO = 'U', the last NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements above the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors;
+ if UPLO = 'L', the first NB columns have been reduced to
+ tridiagonal form, with the diagonal elements overwriting
+ the diagonal elements of A; the elements below the diagonal
+ with the array TAU, represent the orthogonal matrix Q as a
+ product of elementary reflectors.
+ See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= (1,N).
+
+ E (output) REAL array, dimension (N-1)
+ If UPLO = 'U', E(n-nb:n-1) contains the superdiagonal
+ elements of the last NB columns of the reduced matrix;
+ if UPLO = 'L', E(1:nb) contains the subdiagonal elements of
+ the first NB columns of the reduced matrix.
+
+ TAU (output) REAL array, dimension (N-1)
+ The scalar factors of the elementary reflectors, stored in
+ TAU(n-nb:n-1) if UPLO = 'U', and in TAU(1:nb) if UPLO = 'L'.
+ See Further Details.
+
+ W (output) REAL array, dimension (LDW,NB)
+ The n-by-nb matrix W required to update the unreduced part
+ of A.
+
+ LDW (input) INTEGER
+ The leading dimension of the array W. LDW >= max(1,N).
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n) H(n-1) . . . H(n-nb+1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i:n) = 0 and v(i-1) = 1; v(1:i-1) is stored on exit in A(1:i-1,i),
+ and tau in TAU(i-1).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(nb).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+1:n) is stored on exit in A(i+1:n,i),
+ and tau in TAU(i).
+
+ The elements of the vectors v together form the n-by-nb matrix V
+ which is needed, with W, to apply the transformation to the unreduced
+ part of the matrix, using a symmetric rank-2k update of the form:
+ A := A - V*W' - W*V'.
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5 and nb = 2:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( a a a v4 v5 ) ( d )
+ ( a a v4 v5 ) ( 1 d )
+ ( a 1 v5 ) ( v1 1 a )
+ ( d 1 ) ( v1 v2 a a )
+ ( d ) ( v1 v2 a a a )
+
+ where d denotes a diagonal element of the reduced matrix, a denotes
+ an element of the original matrix that is unchanged, and vi denotes
+ an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Quick return if possible
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --e;
+ --tau;
+ w_dim1 = *ldw;
+ w_offset = 1 + w_dim1;
+ w -= w_offset;
+
+ /* Function Body */
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (lsame_(uplo, "U")) {
+
+/* Reduce last NB columns of upper triangle */
+
+ i__1 = *n - *nb + 1;
+ for (i__ = *n; i__ >= i__1; --i__) {
+ iw = i__ - *n + *nb;
+ if (i__ < *n) {
+
+/* Update A(1:i,i) */
+
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__, &i__2, &c_b1290, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &w[i__ + (iw + 1) * w_dim1], ldw, &
+ c_b1011, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ sgemv_("No transpose", &i__, &i__2, &c_b1290, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ c_b1011, &a[i__ * a_dim1 + 1], &c__1);
+ }
+ if (i__ > 1) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(1:i-2,i)
+*/
+
+ i__2 = i__ - 1;
+ slarfg_(&i__2, &a[i__ - 1 + i__ * a_dim1], &a[i__ * a_dim1 +
+ 1], &c__1, &tau[i__ - 1]);
+ e[i__ - 1] = a[i__ - 1 + i__ * a_dim1];
+ a[i__ - 1 + i__ * a_dim1] = 1.f;
+
+/* Compute W(1:i-1,i) */
+
+ i__2 = i__ - 1;
+ ssymv_("Upper", &i__2, &c_b1011, &a[a_offset], lda, &a[i__ *
+ a_dim1 + 1], &c__1, &c_b320, &w[iw * w_dim1 + 1], &
+ c__1);
+ if (i__ < *n) {
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &w[(iw + 1) *
+ w_dim1 + 1], ldw, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b320, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[(i__ +
+ 1) * a_dim1 + 1], lda, &w[i__ + 1 + iw * w_dim1],
+ &c__1, &c_b1011, &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ * a_dim1 + 1], &c__1, &
+ c_b320, &w[i__ + 1 + iw * w_dim1], &c__1);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &w[(iw + 1)
+ * w_dim1 + 1], ldw, &w[i__ + 1 + iw * w_dim1], &
+ c__1, &c_b1011, &w[iw * w_dim1 + 1], &c__1);
+ }
+ i__2 = i__ - 1;
+ sscal_(&i__2, &tau[i__ - 1], &w[iw * w_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ alpha = tau[i__ - 1] * -.5f * sdot_(&i__2, &w[iw * w_dim1 + 1]
+ , &c__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = i__ - 1;
+ saxpy_(&i__2, &alpha, &a[i__ * a_dim1 + 1], &c__1, &w[iw *
+ w_dim1 + 1], &c__1);
+ }
+
+/* L10: */
+ }
+ } else {
+
+/* Reduce first NB columns of lower triangle */
+
+ i__1 = *nb;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/* Update A(i:n,i) */
+
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[i__ + a_dim1],
+ lda, &w[i__ + w_dim1], ldw, &c_b1011, &a[i__ + i__ *
+ a_dim1], &c__1);
+ i__2 = *n - i__ + 1;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &w[i__ + w_dim1],
+ ldw, &a[i__ + a_dim1], lda, &c_b1011, &a[i__ + i__ *
+ a_dim1], &c__1);
+ if (i__ < *n) {
+
+/*
+ Generate elementary reflector H(i) to annihilate
+ A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) +
+ i__ * a_dim1], &c__1, &tau[i__]);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute W(i+1:n,i) */
+
+ i__2 = *n - i__;
+ ssymv_("Lower", &i__2, &c_b1011, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b320, &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &w[i__ + 1 +
+ w_dim1], ldw, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b320, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[i__ + 1 +
+ a_dim1], lda, &w[i__ * w_dim1 + 1], &c__1, &c_b1011, &
+ w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ c_b320, &w[i__ * w_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &w[i__ + 1 +
+ w_dim1], ldw, &w[i__ * w_dim1 + 1], &c__1, &c_b1011, &
+ w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ sscal_(&i__2, &tau[i__], &w[i__ + 1 + i__ * w_dim1], &c__1);
+ i__2 = *n - i__;
+ alpha = tau[i__] * -.5f * sdot_(&i__2, &w[i__ + 1 + i__ *
+ w_dim1], &c__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ saxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &w[
+ i__ + 1 + i__ * w_dim1], &c__1);
+ }
+
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SLATRD */
+
+} /* slatrd_ */
+
+/* Subroutine */ int slauu2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static real aii;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLAUU2 computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the unblocked form of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = sdot_(&i__2, &a[i__ + i__ * a_dim1],
+ lda, &a[i__ + i__ * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1011, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ aii, &a[i__ * a_dim1 + 1], &c__1);
+ } else {
+ sscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ aii = a[i__ + i__ * a_dim1];
+ if (i__ < *n) {
+ i__2 = *n - i__ + 1;
+ a[i__ + i__ * a_dim1] = sdot_(&i__2, &a[i__ + i__ * a_dim1], &
+ c__1, &a[i__ + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1011, &a[i__ + 1 +
+ a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &aii,
+ &a[i__ + a_dim1], lda);
+ } else {
+ sscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of SLAUU2 */
+
+} /* slauu2_ */
+
+/* Subroutine */ int slauum_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, ib, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ ), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+ ), slauu2_(char *, integer *, real *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SLAUUM computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the blocked form of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SLAUUM", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ slauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ strmm_("Right", "Upper", "Transpose", "Non-unit", &i__3, &ib,
+ &c_b1011, &a[i__ + i__ * a_dim1], lda, &a[i__ *
+ a_dim1 + 1], lda);
+ slauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ sgemm_("No transpose", "Transpose", &i__3, &ib, &i__4, &
+ c_b1011, &a[(i__ + ib) * a_dim1 + 1], lda, &a[i__
+ + (i__ + ib) * a_dim1], lda, &c_b1011, &a[i__ *
+ a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ ssyrk_("Upper", "No transpose", &ib, &i__3, &c_b1011, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b1011, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ strmm_("Left", "Lower", "Transpose", "Non-unit", &ib, &i__3, &
+ c_b1011, &a[i__ + i__ * a_dim1], lda, &a[i__ + a_dim1]
+ , lda);
+ slauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ sgemm_("Transpose", "No transpose", &ib, &i__3, &i__4, &
+ c_b1011, &a[i__ + ib + i__ * a_dim1], lda, &a[i__
+ + ib + a_dim1], lda, &c_b1011, &a[i__ + a_dim1],
+ lda);
+ i__3 = *n - i__ - ib + 1;
+ ssyrk_("Lower", "Transpose", &ib, &i__3, &c_b1011, &a[i__
+ + ib + i__ * a_dim1], lda, &c_b1011, &a[i__ + i__
+ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of SLAUUM */
+
+} /* slauum_ */
+
+/* Subroutine */ int sorg2r_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SORG2R generates an m by n real matrix Q with orthonormal columns,
+ which is defined as the first n columns of a product of k elementary
+ reflectors of order m
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by SGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by SGEQRF in the first k columns of its array
+ argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQRF.
+
+ WORK (workspace) REAL array, dimension (N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORG2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+/* Initialise columns k+1:n to columns of the unit matrix */
+
+ i__1 = *n;
+ for (j = *k + 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L20: */
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the left */
+
+ if (i__ < *n) {
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__1 = *m - i__ + 1;
+ i__2 = *n - i__;
+ slarf_("Left", &i__1, &i__2, &a[i__ + i__ * a_dim1], &c__1, &tau[
+ i__], &a[i__ + (i__ + 1) * a_dim1], lda, &work[1]);
+ }
+ if (i__ < *m) {
+ i__1 = *m - i__;
+ r__1 = -tau[i__];
+ sscal_(&i__1, &r__1, &a[i__ + 1 + i__ * a_dim1], &c__1);
+ }
+ a[i__ + i__ * a_dim1] = 1.f - tau[i__];
+
+/* Set A(1:i-1,i) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[l + i__ * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORG2R */
+
+} /* sorg2r_ */
+
+/* Subroutine */ int sorgbr_(char *vect, integer *m, integer *n, integer *k,
+ real *a, integer *lda, real *tau, real *work, integer *lwork, integer
+ *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, mn;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical wantq;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *), sorgqr_(
+ integer *, integer *, integer *, real *, integer *, real *, real *
+ , integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORGBR generates one of the real orthogonal matrices Q or P**T
+ determined by SGEBRD when reducing a real matrix A to bidiagonal
+ form: A = Q * B * P**T. Q and P**T are defined as products of
+ elementary reflectors H(i) or G(i) respectively.
+
+ If VECT = 'Q', A is assumed to have been an M-by-K matrix, and Q
+ is of order M:
+ if m >= k, Q = H(1) H(2) . . . H(k) and SORGBR returns the first n
+ columns of Q, where m >= n >= k;
+ if m < k, Q = H(1) H(2) . . . H(m-1) and SORGBR returns Q as an
+ M-by-M matrix.
+
+ If VECT = 'P', A is assumed to have been a K-by-N matrix, and P**T
+ is of order N:
+ if k < n, P**T = G(k) . . . G(2) G(1) and SORGBR returns the first m
+ rows of P**T, where n >= m >= k;
+ if k >= n, P**T = G(n-1) . . . G(2) G(1) and SORGBR returns P**T as
+ an N-by-N matrix.
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ Specifies whether the matrix Q or the matrix P**T is
+ required, as defined in the transformation applied by SGEBRD:
+ = 'Q': generate Q;
+ = 'P': generate P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix Q or P**T to be returned.
+ M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q or P**T to be returned.
+ N >= 0.
+ If VECT = 'Q', M >= N >= min(M,K);
+ if VECT = 'P', N >= M >= min(N,K).
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original M-by-K
+ matrix reduced by SGEBRD.
+ If VECT = 'P', the number of rows in the original K-by-N
+ matrix reduced by SGEBRD.
+ K >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by SGEBRD.
+ On exit, the M-by-N matrix Q or P**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) REAL array, dimension
+ (min(M,K)) if VECT = 'Q'
+ (min(N,K)) if VECT = 'P'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i), which determines Q or P**T, as
+ returned by SGEBRD in its array argument TAUQ or TAUP.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,min(M,N)).
+ For optimum performance LWORK >= min(M,N)*NB, where NB
+ is the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ wantq = lsame_(vect, "Q");
+ mn = min(*m,*n);
+ lquery = *lwork == -1;
+ if (! wantq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (*m < 0) {
+ *info = -2;
+ } else if (((*n < 0) || (wantq && ((*n > *m) || (*n < min(*m,*k))))) || (!
+ wantq && ((*m > *n) || (*m < min(*n,*k))))) {
+ *info = -3;
+ } else if (*k < 0) {
+ *info = -4;
+ } else if (*lda < max(1,*m)) {
+ *info = -6;
+ } else if (*lwork < max(1,mn) && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+ if (wantq) {
+ nb = ilaenv_(&c__1, "SORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ } else {
+ nb = ilaenv_(&c__1, "SORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ }
+ lwkopt = max(1,mn) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*m == 0) || (*n == 0)) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (wantq) {
+
+/*
+ Form Q, determined by a call to SGEBRD to reduce an m-by-k
+ matrix
+*/
+
+ if (*m >= *k) {
+
+/* If m >= k, assume m >= n >= k */
+
+ sorgqr_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If m < k, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first row and column of Q
+ to those of the unit matrix
+*/
+
+ for (j = *m; j >= 2; --j) {
+ a[j * a_dim1 + 1] = 0.f;
+ i__1 = *m;
+ for (i__ = j + 1; i__ <= i__1; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+ a[a_dim1 + 1] = 1.f;
+ i__1 = *m;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.f;
+/* L30: */
+ }
+ if (*m > 1) {
+
+/* Form Q(2:m,2:m) */
+
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ sorgqr_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ } else {
+
+/*
+ Form P', determined by a call to SGEBRD to reduce a k-by-n
+ matrix
+*/
+
+ if (*k < *n) {
+
+/* If k < n, assume k <= m <= n */
+
+ sorglq_(m, n, k, &a[a_offset], lda, &tau[1], &work[1], lwork, &
+ iinfo);
+
+ } else {
+
+/*
+ If k >= n, assume m = n
+
+ Shift the vectors which define the elementary reflectors one
+ row downward, and set the first row and column of P' to
+ those of the unit matrix
+*/
+
+ a[a_dim1 + 1] = 1.f;
+ i__1 = *n;
+ for (i__ = 2; i__ <= i__1; ++i__) {
+ a[i__ + a_dim1] = 0.f;
+/* L40: */
+ }
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ for (i__ = j - 1; i__ >= 2; --i__) {
+ a[i__ + j * a_dim1] = a[i__ - 1 + j * a_dim1];
+/* L50: */
+ }
+ a[j * a_dim1 + 1] = 0.f;
+/* L60: */
+ }
+ if (*n > 1) {
+
+/* Form P'(2:n,2:n) */
+
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ sorglq_(&i__1, &i__2, &i__3, &a[((a_dim1) << (1)) + 2], lda, &
+ tau[1], &work[1], lwork, &iinfo);
+ }
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORGBR */
+
+} /* sorgbr_ */
+
+/* Subroutine */ int sorghr_(integer *n, integer *ilo, integer *ihi, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, j, nb, nh, iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORGHR generates a real orthogonal matrix Q which is defined as the
+ product of IHI-ILO elementary reflectors of order N, as returned by
+ SGEHRD:
+
+ Q = H(ilo) H(ilo+1) . . . H(ihi-1).
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix Q. N >= 0.
+
+ ILO (input) INTEGER
+ IHI (input) INTEGER
+ ILO and IHI must have the same values as in the previous call
+ of SGEHRD. Q is equal to the unit matrix except in the
+ submatrix Q(ilo+1:ihi,ilo+1:ihi).
+ 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the vectors which define the elementary reflectors,
+ as returned by SGEHRD.
+ On exit, the N-by-N orthogonal matrix Q.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ TAU (input) REAL array, dimension (N-1)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEHRD.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= IHI-ILO.
+ For optimum performance LWORK >= (IHI-ILO)*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nh = *ihi - *ilo;
+ lquery = *lwork == -1;
+ if (*n < 0) {
+ *info = -1;
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
+ *info = -2;
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < max(1,nh) && ! lquery) {
+ *info = -8;
+ }
+
+ if (*info == 0) {
+ nb = ilaenv_(&c__1, "SORGQR", " ", &nh, &nh, &nh, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ lwkopt = max(1,nh) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGHR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+/*
+ Shift the vectors which define the elementary reflectors one
+ column to the right, and set the first ilo and the last n-ihi
+ rows and columns to those of the unit matrix
+*/
+
+ i__1 = *ilo + 1;
+ for (j = *ihi; j >= i__1; --j) {
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ i__2 = *ihi;
+ for (i__ = j + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = a[i__ + (j - 1) * a_dim1];
+/* L20: */
+ }
+ i__2 = *n;
+ for (i__ = *ihi + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ i__1 = *ilo;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L50: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L60: */
+ }
+ i__1 = *n;
+ for (j = *ihi + 1; j <= i__1; ++j) {
+ i__2 = *n;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L70: */
+ }
+ a[j + j * a_dim1] = 1.f;
+/* L80: */
+ }
+
+ if (nh > 0) {
+
+/* Generate Q(ilo+1:ihi,ilo+1:ihi) */
+
+ sorgqr_(&nh, &nh, &nh, &a[*ilo + 1 + (*ilo + 1) * a_dim1], lda, &tau[*
+ ilo], &work[1], lwork, &iinfo);
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORGHR */
+
+} /* sorghr_ */
+
+/* Subroutine */ int sorgl2_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ real r__1;
+
+ /* Local variables */
+ static integer i__, j, l;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ slarf_(char *, integer *, integer *, real *, integer *, real *,
+ real *, integer *, real *), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORGL2 generates an m by n real matrix Q with orthonormal rows,
+ which is defined as the first m rows of a product of k elementary
+ reflectors of order n
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by SGELQF in the first k rows of its array argument A.
+ On exit, the m-by-n matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGELQF.
+
+ WORK (workspace) REAL array, dimension (M)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGL2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ return 0;
+ }
+
+ if (*k < *m) {
+
+/* Initialise rows k+1:m to rows of the unit matrix */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (l = *k + 1; l <= i__2; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L10: */
+ }
+ if (j > *k && j <= *m) {
+ a[j + j * a_dim1] = 1.f;
+ }
+/* L20: */
+ }
+ }
+
+ for (i__ = *k; i__ >= 1; --i__) {
+
+/* Apply H(i) to A(i:m,i:n) from the right */
+
+ if (i__ < *n) {
+ if (i__ < *m) {
+ a[i__ + i__ * a_dim1] = 1.f;
+ i__1 = *m - i__;
+ i__2 = *n - i__ + 1;
+ slarf_("Right", &i__1, &i__2, &a[i__ + i__ * a_dim1], lda, &
+ tau[i__], &a[i__ + 1 + i__ * a_dim1], lda, &work[1]);
+ }
+ i__1 = *n - i__;
+ r__1 = -tau[i__];
+ sscal_(&i__1, &r__1, &a[i__ + (i__ + 1) * a_dim1], lda);
+ }
+ a[i__ + i__ * a_dim1] = 1.f - tau[i__];
+
+/* Set A(i,1:i-1) to zero */
+
+ i__1 = i__ - 1;
+ for (l = 1; l <= i__1; ++l) {
+ a[i__ + l * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+ return 0;
+
+/* End of SORGL2 */
+
+} /* sorgl2_ */
+
+/* Subroutine */ int sorglq_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorgl2_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+ , real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORGLQ generates an M-by-N real matrix Q with orthonormal rows,
+ which is defined as the first M rows of a product of K elementary
+ reflectors of order N
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGELQF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. N >= M.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. M >= K >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the i-th row must contain the vector which defines
+ the elementary reflector H(i), for i = 1,2,...,k, as returned
+ by SGELQF in the first k rows of its array argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGELQF.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,M).
+ For optimum performance LWORK >= M*NB, where NB is
+ the optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SORGLQ", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*m) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if (*n < *m) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *m)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*m) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*m <= 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *m;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGLQ", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *m;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGLQ", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk rows are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(kk+1:m,1:kk) to zero. */
+
+ i__1 = kk;
+ for (j = 1; j <= i__1; ++j) {
+ i__2 = *m;
+ for (i__ = kk + 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *m) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorgl2_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *m) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *n - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H' to A(i+ib:m,i:n) from the right */
+
+ i__2 = *m - i__ - ib + 1;
+ i__3 = *n - i__ + 1;
+ slarfb_("Right", "Transpose", "Forward", "Rowwise", &i__2, &
+ i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[1], &
+ ldwork, &a[i__ + ib + i__ * a_dim1], lda, &work[ib +
+ 1], &ldwork);
+ }
+
+/* Apply H' to columns i:n of current block */
+
+ i__2 = *n - i__ + 1;
+ sorgl2_(&ib, &i__2, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set columns 1:i-1 of current block to zero */
+
+ i__2 = i__ - 1;
+ for (j = 1; j <= i__2; ++j) {
+ i__3 = i__ + ib - 1;
+ for (l = i__; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGLQ */
+
+} /* sorglq_ */
+
+/* Subroutine */ int sorgqr_(integer *m, integer *n, integer *k, real *a,
+ integer *lda, real *tau, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, l, ib, nb, ki, kk, nx, iws, nbmin, iinfo;
+ extern /* Subroutine */ int sorg2r_(integer *, integer *, integer *, real
+ *, integer *, real *, real *, integer *), slarfb_(char *, char *,
+ char *, char *, integer *, integer *, integer *, real *, integer *
+ , real *, integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORGQR generates an M-by-N real matrix Q with orthonormal columns,
+ which is defined as the first N columns of a product of K elementary
+ reflectors of order M
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by SGEQRF.
+
+ Arguments
+ =========
+
+ M (input) INTEGER
+ The number of rows of the matrix Q. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix Q. M >= N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines the
+ matrix Q. N >= K >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the i-th column must contain the vector which
+ defines the elementary reflector H(i), for i = 1,2,...,k, as
+ returned by SGEQRF in the first k columns of its array
+ argument A.
+ On exit, the M-by-N matrix Q.
+
+ LDA (input) INTEGER
+ The first dimension of the array A. LDA >= max(1,M).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQRF.
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= max(1,N).
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument has an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ nb = ilaenv_(&c__1, "SORGQR", " ", m, n, k, &c_n1, (ftnlen)6, (ftnlen)1);
+ lwkopt = max(1,*n) * nb;
+ work[1] = (real) lwkopt;
+ lquery = *lwork == -1;
+ if (*m < 0) {
+ *info = -1;
+ } else if ((*n < 0) || (*n > *m)) {
+ *info = -2;
+ } else if ((*k < 0) || (*k > *n)) {
+ *info = -3;
+ } else if (*lda < max(1,*m)) {
+ *info = -5;
+ } else if (*lwork < max(1,*n) && ! lquery) {
+ *info = -8;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORGQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ nx = 0;
+ iws = *n;
+ if (nb > 1 && nb < *k) {
+
+/*
+ Determine when to cross over from blocked to unblocked code.
+
+ Computing MAX
+*/
+ i__1 = 0, i__2 = ilaenv_(&c__3, "SORGQR", " ", m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *k) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: reduce NB and
+ determine the minimum value of NB.
+*/
+
+ nb = *lwork / ldwork;
+/* Computing MAX */
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORGQR", " ", m, n, k, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ nbmin = max(i__1,i__2);
+ }
+ }
+ }
+
+ if (nb >= nbmin && nb < *k && nx < *k) {
+
+/*
+ Use blocked code after the last block.
+ The first kk columns are handled by the block method.
+*/
+
+ ki = (*k - nx - 1) / nb * nb;
+/* Computing MIN */
+ i__1 = *k, i__2 = ki + nb;
+ kk = min(i__1,i__2);
+
+/* Set A(1:kk,kk+1:n) to zero. */
+
+ i__1 = *n;
+ for (j = kk + 1; j <= i__1; ++j) {
+ i__2 = kk;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ a[i__ + j * a_dim1] = 0.f;
+/* L10: */
+ }
+/* L20: */
+ }
+ } else {
+ kk = 0;
+ }
+
+/* Use unblocked code for the last or only block. */
+
+ if (kk < *n) {
+ i__1 = *m - kk;
+ i__2 = *n - kk;
+ i__3 = *k - kk;
+ sorg2r_(&i__1, &i__2, &i__3, &a[kk + 1 + (kk + 1) * a_dim1], lda, &
+ tau[kk + 1], &work[1], &iinfo);
+ }
+
+ if (kk > 0) {
+
+/* Use blocked code */
+
+ i__1 = -nb;
+ for (i__ = ki + 1; i__1 < 0 ? i__ >= 1 : i__ <= 1; i__ += i__1) {
+/* Computing MIN */
+ i__2 = nb, i__3 = *k - i__ + 1;
+ ib = min(i__2,i__3);
+ if (i__ + ib <= *n) {
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__2 = *m - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__2, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], &work[1], &ldwork);
+
+/* Apply H to A(i:m,i+ib:n) from the left */
+
+ i__2 = *m - i__ + 1;
+ i__3 = *n - i__ - ib + 1;
+ slarfb_("Left", "No transpose", "Forward", "Columnwise", &
+ i__2, &i__3, &ib, &a[i__ + i__ * a_dim1], lda, &work[
+ 1], &ldwork, &a[i__ + (i__ + ib) * a_dim1], lda, &
+ work[ib + 1], &ldwork);
+ }
+
+/* Apply H to rows i:m of current block */
+
+ i__2 = *m - i__ + 1;
+ sorg2r_(&i__2, &ib, &ib, &a[i__ + i__ * a_dim1], lda, &tau[i__], &
+ work[1], &iinfo);
+
+/* Set rows 1:i-1 of current block to zero */
+
+ i__2 = i__ + ib - 1;
+ for (j = i__; j <= i__2; ++j) {
+ i__3 = i__ - 1;
+ for (l = 1; l <= i__3; ++l) {
+ a[l + j * a_dim1] = 0.f;
+/* L30: */
+ }
+/* L40: */
+ }
+/* L50: */
+ }
+ }
+
+ work[1] = (real) iws;
+ return 0;
+
+/* End of SORGQR */
+
+} /* sorgqr_ */
+
+/* Subroutine */ int sorm2l_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, mi, ni, nq;
+ static real aii;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SORM2L overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGEQLF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQLF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) REAL array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORM2L", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(1:m-k+i,1:n) */
+
+ mi = *m - *k + i__;
+ } else {
+
+/* H(i) is applied to C(1:m,1:n-k+i) */
+
+ ni = *n - *k + i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[nq - *k + i__ + i__ * a_dim1];
+ a[nq - *k + i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ * a_dim1 + 1], &c__1, &tau[i__], &c__[
+ c_offset], ldc, &work[1]);
+ a[nq - *k + i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORM2L */
+
+} /* sorm2l_ */
+
+/* Subroutine */ int sorm2r_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static real aii;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SORM2R overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by SGEQRF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQRF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) REAL array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORM2R", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], &c__1, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORM2R */
+
+} /* sorm2r_ */
+
+/* Subroutine */ int sormbr_(char *vect, char *side, char *trans, integer *m,
+ integer *n, integer *k, real *a, integer *lda, real *tau, real *c__,
+ integer *ldc, real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2];
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical notran, applyq;
+ static char transt[1];
+ extern /* Subroutine */ int sormlq_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ If VECT = 'Q', SORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ If VECT = 'P', SORMBR overwrites the general real M-by-N matrix C
+ with
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': P * C C * P
+ TRANS = 'T': P**T * C C * P**T
+
+ Here Q and P**T are the orthogonal matrices determined by SGEBRD when
+ reducing a real matrix A to bidiagonal form: A = Q * B * P**T. Q and
+ P**T are defined as products of elementary reflectors H(i) and G(i)
+ respectively.
+
+ Let nq = m if SIDE = 'L' and nq = n if SIDE = 'R'. Thus nq is the
+ order of the orthogonal matrix Q or P**T that is applied.
+
+ If VECT = 'Q', A is assumed to have been an NQ-by-K matrix:
+ if nq >= k, Q = H(1) H(2) . . . H(k);
+ if nq < k, Q = H(1) H(2) . . . H(nq-1).
+
+ If VECT = 'P', A is assumed to have been a K-by-NQ matrix:
+ if k < nq, P = G(1) G(2) . . . G(k);
+ if k >= nq, P = G(1) G(2) . . . G(nq-1).
+
+ Arguments
+ =========
+
+ VECT (input) CHARACTER*1
+ = 'Q': apply Q or Q**T;
+ = 'P': apply P or P**T.
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q, Q**T, P or P**T from the Left;
+ = 'R': apply Q, Q**T, P or P**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q or P;
+ = 'T': Transpose, apply Q**T or P**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ If VECT = 'Q', the number of columns in the original
+ matrix reduced by SGEBRD.
+ If VECT = 'P', the number of rows in the original
+ matrix reduced by SGEBRD.
+ K >= 0.
+
+ A (input) REAL array, dimension
+ (LDA,min(nq,K)) if VECT = 'Q'
+ (LDA,nq) if VECT = 'P'
+ The vectors which define the elementary reflectors H(i) and
+ G(i), whose products determine the matrices Q and P, as
+ returned by SGEBRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If VECT = 'Q', LDA >= max(1,nq);
+ if VECT = 'P', LDA >= max(1,min(nq,K)).
+
+ TAU (input) REAL array, dimension (min(nq,K))
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i) or G(i) which determines Q or P, as returned
+ by SGEBRD in the array argument TAUQ or TAUP.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q
+ or P*C or P**T*C or C*P or C*P**T.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ applyq = lsame_(vect, "Q");
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q or P and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! applyq && ! lsame_(vect, "P")) {
+ *info = -1;
+ } else if (! left && ! lsame_(side, "R")) {
+ *info = -2;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*k < 0) {
+ *info = -6;
+ } else /* if(complicated condition) */ {
+/* Computing MAX */
+ i__1 = 1, i__2 = min(nq,*k);
+ if ((applyq && *lda < max(1,nq)) || (! applyq && *lda < max(i__1,i__2)
+ )) {
+ *info = -8;
+ } else if (*ldc < max(1,*m)) {
+ *info = -11;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -13;
+ }
+ }
+
+ if (*info == 0) {
+ if (applyq) {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *m - 1;
+ i__2 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMLQ", ch__1, &i__1, n, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = *n - 1;
+ i__2 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMLQ", ch__1, m, &i__1, &i__2, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMBR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ work[1] = 1.f;
+ if ((*m == 0) || (*n == 0)) {
+ return 0;
+ }
+
+ if (applyq) {
+
+/* Apply Q */
+
+ if (nq >= *k) {
+
+/* Q was determined by a call to SGEBRD with nq >= k */
+
+ sormqr_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* Q was determined by a call to SGEBRD with nq < k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ sormqr_(side, trans, &mi, &ni, &i__1, &a[a_dim1 + 2], lda, &tau[1]
+ , &c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ } else {
+
+/* Apply P */
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+ if (nq > *k) {
+
+/* P was determined by a call to SGEBRD with nq > k */
+
+ sormlq_(side, transt, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], lwork, &iinfo);
+ } else if (nq > 1) {
+
+/* P was determined by a call to SGEBRD with nq <= k */
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ i1 = 2;
+ i2 = 1;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ i1 = 1;
+ i2 = 2;
+ }
+ i__1 = nq - 1;
+ sormlq_(side, transt, &mi, &ni, &i__1, &a[((a_dim1) << (1)) + 1],
+ lda, &tau[1], &c__[i1 + i2 * c_dim1], ldc, &work[1],
+ lwork, &iinfo);
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMBR */
+
+} /* sormbr_ */
+
+/* Subroutine */ int sorml2_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq;
+ static real aii;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int slarf_(char *, integer *, integer *, real *,
+ integer *, real *, real *, integer *, real *), xerbla_(
+ char *, integer *);
+ static logical notran;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SORML2 overwrites the general real m by n matrix C with
+
+ Q * C if SIDE = 'L' and TRANS = 'N', or
+
+ Q'* C if SIDE = 'L' and TRANS = 'T', or
+
+ C * Q if SIDE = 'R' and TRANS = 'N', or
+
+ C * Q' if SIDE = 'R' and TRANS = 'T',
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGELQF. Q is of order m if SIDE = 'L' and of order n
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q' from the Left
+ = 'R': apply Q or Q' from the Right
+
+ TRANS (input) CHARACTER*1
+ = 'N': apply Q (No transpose)
+ = 'T': apply Q' (Transpose)
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGELQF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the m by n matrix C.
+ On exit, C is overwritten by Q*C or Q'*C or C*Q' or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace) REAL array, dimension
+ (N) if SIDE = 'L',
+ (M) if SIDE = 'R'
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+
+/* NQ is the order of Q */
+
+ if (left) {
+ nq = *m;
+ } else {
+ nq = *n;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORML2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ return 0;
+ }
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = 1;
+ } else {
+ i1 = *k;
+ i2 = 1;
+ i3 = -1;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+ if (left) {
+
+/* H(i) is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H(i) is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H(i) */
+
+ aii = a[i__ + i__ * a_dim1];
+ a[i__ + i__ * a_dim1] = 1.f;
+ slarf_(side, &mi, &ni, &a[i__ + i__ * a_dim1], lda, &tau[i__], &c__[
+ ic + jc * c_dim1], ldc, &work[1]);
+ a[i__ + i__ * a_dim1] = aii;
+/* L10: */
+ }
+ return 0;
+
+/* End of SORML2 */
+
+} /* sorml2_ */
+
+/* Subroutine */ int sormlq_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static real t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int sorml2_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+ , integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static logical notran;
+ static integer ldwork;
+ static char transt[1];
+ static integer lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORMLQ overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGELQF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension
+ (LDA,M) if SIDE = 'L',
+ (LDA,N) if SIDE = 'R'
+ The i-th row must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGELQF in the first k rows of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,K).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGELQF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,*k)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMLQ", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMLQ", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ sorml2_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ if (notran) {
+ *(unsigned char *)transt = 'T';
+ } else {
+ *(unsigned char *)transt = 'N';
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ slarft_("Forward", "Rowwise", &i__4, &ib, &a[i__ + i__ * a_dim1],
+ lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, transt, "Forward", "Rowwise", &mi, &ni, &ib, &a[i__
+ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc * c_dim1],
+ ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMLQ */
+
+} /* sormlq_ */
+
+/* Subroutine */ int sormql_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static real t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int sorm2l_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+ , integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORMQL overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(k) . . . H(2) H(1)
+
+ as returned by SGEQLF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGEQLF in the last k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQLF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMQL", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMQL", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ sorm2l_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && notran) || (! left && ! notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ } else {
+ mi = *m;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i+ib-1) . . . H(i+1) H(i)
+*/
+
+ i__4 = nq - *k + i__ + ib - 1;
+ slarft_("Backward", "Columnwise", &i__4, &ib, &a[i__ * a_dim1 + 1]
+ , lda, &tau[i__], t, &c__65);
+ if (left) {
+
+/* H or H' is applied to C(1:m-k+i+ib-1,1:n) */
+
+ mi = *m - *k + i__ + ib - 1;
+ } else {
+
+/* H or H' is applied to C(1:m,1:n-k+i+ib-1) */
+
+ ni = *n - *k + i__ + ib - 1;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, trans, "Backward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ * a_dim1 + 1], lda, t, &c__65, &c__[c_offset], ldc, &
+ work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMQL */
+
+} /* sormql_ */
+
+/* Subroutine */ int sormqr_(char *side, char *trans, integer *m, integer *n,
+ integer *k, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2], i__4,
+ i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i__;
+ static real t[4160] /* was [65][64] */;
+ static integer i1, i2, i3, ib, ic, jc, nb, mi, ni, nq, nw, iws;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ extern /* Subroutine */ int sorm2r_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *), slarfb_(char *, char *, char *, char *
+ , integer *, integer *, integer *, real *, integer *, real *,
+ integer *, real *, integer *, real *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slarft_(char *, char *, integer *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static logical notran;
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORMQR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix defined as the product of k
+ elementary reflectors
+
+ Q = H(1) H(2) . . . H(k)
+
+ as returned by SGEQRF. Q is of order M if SIDE = 'L' and of order N
+ if SIDE = 'R'.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ K (input) INTEGER
+ The number of elementary reflectors whose product defines
+ the matrix Q.
+ If SIDE = 'L', M >= K >= 0;
+ if SIDE = 'R', N >= K >= 0.
+
+ A (input) REAL array, dimension (LDA,K)
+ The i-th column must contain the vector which defines the
+ elementary reflector H(i), for i = 1,2,...,k, as returned by
+ SGEQRF in the first k columns of its array argument A.
+ A is modified by the routine but restored on exit.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ If SIDE = 'L', LDA >= max(1,M);
+ if SIDE = 'R', LDA >= max(1,N).
+
+ TAU (input) REAL array, dimension (K)
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SGEQRF.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ notran = lsame_(trans, "N");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! notran && ! lsame_(trans, "T")) {
+ *info = -2;
+ } else if (*m < 0) {
+ *info = -3;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if ((*k < 0) || (*k > nq)) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+
+/*
+ Determine the block size. NB may be at most NBMAX, where NBMAX
+ is used to define the local array T.
+
+ Computing MIN
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 64, i__2 = ilaenv_(&c__1, "SORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nb = min(i__1,i__2);
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SORMQR", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nbmin = 2;
+ ldwork = nw;
+ if (nb > 1 && nb < *k) {
+ iws = nw * nb;
+ if (*lwork < iws) {
+ nb = *lwork / ldwork;
+/*
+ Computing MAX
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = side;
+ i__3[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ i__1 = 2, i__2 = ilaenv_(&c__2, "SORMQR", ch__1, m, n, k, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ nbmin = max(i__1,i__2);
+ }
+ } else {
+ iws = nw;
+ }
+
+ if ((nb < nbmin) || (nb >= *k)) {
+
+/* Use unblocked code */
+
+ sorm2r_(side, trans, m, n, k, &a[a_offset], lda, &tau[1], &c__[
+ c_offset], ldc, &work[1], &iinfo);
+ } else {
+
+/* Use blocked code */
+
+ if ((left && ! notran) || (! left && notran)) {
+ i1 = 1;
+ i2 = *k;
+ i3 = nb;
+ } else {
+ i1 = (*k - 1) / nb * nb + 1;
+ i2 = 1;
+ i3 = -nb;
+ }
+
+ if (left) {
+ ni = *n;
+ jc = 1;
+ } else {
+ mi = *m;
+ ic = 1;
+ }
+
+ i__1 = i2;
+ i__2 = i3;
+ for (i__ = i1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *k - i__ + 1;
+ ib = min(i__4,i__5);
+
+/*
+ Form the triangular factor of the block reflector
+ H = H(i) H(i+1) . . . H(i+ib-1)
+*/
+
+ i__4 = nq - i__ + 1;
+ slarft_("Forward", "Columnwise", &i__4, &ib, &a[i__ + i__ *
+ a_dim1], lda, &tau[i__], t, &c__65)
+ ;
+ if (left) {
+
+/* H or H' is applied to C(i:m,1:n) */
+
+ mi = *m - i__ + 1;
+ ic = i__;
+ } else {
+
+/* H or H' is applied to C(1:m,i:n) */
+
+ ni = *n - i__ + 1;
+ jc = i__;
+ }
+
+/* Apply H or H' */
+
+ slarfb_(side, trans, "Forward", "Columnwise", &mi, &ni, &ib, &a[
+ i__ + i__ * a_dim1], lda, t, &c__65, &c__[ic + jc *
+ c_dim1], ldc, &work[1], &ldwork);
+/* L10: */
+ }
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMQR */
+
+} /* sormqr_ */
+
+/* Subroutine */ int sormtr_(char *side, char *uplo, char *trans, integer *m,
+ integer *n, real *a, integer *lda, real *tau, real *c__, integer *ldc,
+ real *work, integer *lwork, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer i1, i2, nb, mi, ni, nq, nw;
+ static logical left;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int sormql_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+ static integer lwkopt;
+ static logical lquery;
+ extern /* Subroutine */ int sormqr_(char *, char *, integer *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SORMTR overwrites the general real M-by-N matrix C with
+
+ SIDE = 'L' SIDE = 'R'
+ TRANS = 'N': Q * C C * Q
+ TRANS = 'T': Q**T * C C * Q**T
+
+ where Q is a real orthogonal matrix of order nq, with nq = m if
+ SIDE = 'L' and nq = n if SIDE = 'R'. Q is defined as the product of
+ nq-1 elementary reflectors, as returned by SSYTRD:
+
+ if UPLO = 'U', Q = H(nq-1) . . . H(2) H(1);
+
+ if UPLO = 'L', Q = H(1) H(2) . . . H(nq-1).
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'L': apply Q or Q**T from the Left;
+ = 'R': apply Q or Q**T from the Right.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A contains elementary reflectors
+ from SSYTRD;
+ = 'L': Lower triangle of A contains elementary reflectors
+ from SSYTRD.
+
+ TRANS (input) CHARACTER*1
+ = 'N': No transpose, apply Q;
+ = 'T': Transpose, apply Q**T.
+
+ M (input) INTEGER
+ The number of rows of the matrix C. M >= 0.
+
+ N (input) INTEGER
+ The number of columns of the matrix C. N >= 0.
+
+ A (input) REAL array, dimension
+ (LDA,M) if SIDE = 'L'
+ (LDA,N) if SIDE = 'R'
+ The vectors which define the elementary reflectors, as
+ returned by SSYTRD.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A.
+ LDA >= max(1,M) if SIDE = 'L'; LDA >= max(1,N) if SIDE = 'R'.
+
+ TAU (input) REAL array, dimension
+ (M-1) if SIDE = 'L'
+ (N-1) if SIDE = 'R'
+ TAU(i) must contain the scalar factor of the elementary
+ reflector H(i), as returned by SSYTRD.
+
+ C (input/output) REAL array, dimension (LDC,N)
+ On entry, the M-by-N matrix C.
+ On exit, C is overwritten by Q*C or Q**T*C or C*Q**T or C*Q.
+
+ LDC (input) INTEGER
+ The leading dimension of the array C. LDC >= max(1,M).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If SIDE = 'L', LWORK >= max(1,N);
+ if SIDE = 'R', LWORK >= max(1,M).
+ For optimum performance LWORK >= N*NB if SIDE = 'L', and
+ LWORK >= M*NB if SIDE = 'R', where NB is the optimal
+ blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input arguments
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --tau;
+ c_dim1 = *ldc;
+ c_offset = 1 + c_dim1;
+ c__ -= c_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ left = lsame_(side, "L");
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+
+/* NQ is the order of Q and NW is the minimum dimension of WORK */
+
+ if (left) {
+ nq = *m;
+ nw = *n;
+ } else {
+ nq = *n;
+ nw = *m;
+ }
+ if (! left && ! lsame_(side, "R")) {
+ *info = -1;
+ } else if (! upper && ! lsame_(uplo, "L")) {
+ *info = -2;
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "T")) {
+ *info = -3;
+ } else if (*m < 0) {
+ *info = -4;
+ } else if (*n < 0) {
+ *info = -5;
+ } else if (*lda < max(1,nq)) {
+ *info = -7;
+ } else if (*ldc < max(1,*m)) {
+ *info = -10;
+ } else if (*lwork < max(1,nw) && ! lquery) {
+ *info = -12;
+ }
+
+ if (*info == 0) {
+ if (upper) {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQL", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQL", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ } else {
+ if (left) {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *m - 1;
+ i__3 = *m - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, &i__2, n, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ } else {
+/* Writing concatenation */
+ i__1[0] = 1, a__1[0] = side;
+ i__1[1] = 1, a__1[1] = trans;
+ s_cat(ch__1, a__1, i__1, &c__2, (ftnlen)2);
+ i__2 = *n - 1;
+ i__3 = *n - 1;
+ nb = ilaenv_(&c__1, "SORMQR", ch__1, m, &i__2, &i__3, &c_n1, (
+ ftnlen)6, (ftnlen)2);
+ }
+ }
+ lwkopt = max(1,nw) * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__2 = -(*info);
+ xerbla_("SORMTR", &i__2);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (((*m == 0) || (*n == 0)) || (nq == 1)) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ if (left) {
+ mi = *m - 1;
+ ni = *n;
+ } else {
+ mi = *m;
+ ni = *n - 1;
+ }
+
+ if (upper) {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'U' */
+
+ i__2 = nq - 1;
+ sormql_(side, trans, &mi, &ni, &i__2, &a[((a_dim1) << (1)) + 1], lda,
+ &tau[1], &c__[c_offset], ldc, &work[1], lwork, &iinfo);
+ } else {
+
+/* Q was determined by a call to SSYTRD with UPLO = 'L' */
+
+ if (left) {
+ i1 = 2;
+ i2 = 1;
+ } else {
+ i1 = 1;
+ i2 = 2;
+ }
+ i__2 = nq - 1;
+ sormqr_(side, trans, &mi, &ni, &i__2, &a[a_dim1 + 2], lda, &tau[1], &
+ c__[i1 + i2 * c_dim1], ldc, &work[1], lwork, &iinfo);
+ }
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SORMTR */
+
+} /* sormtr_ */
+
+/* Subroutine */ int spotf2_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer j;
+ static real ajj;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *),
+ sgemv_(char *, integer *, integer *, real *, real *, integer *,
+ real *, integer *, real *, real *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ SPOTF2 computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U' * U , if UPLO = 'U', or
+ A = L * L', if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the unblocked version of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n by n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U'*U or A = L*L'.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+ > 0: if INFO = k, the leading minor of order k is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTF2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute U(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - sdot_(&i__2, &a[j * a_dim1 + 1], &c__1,
+ &a[j * a_dim1 + 1], &c__1);
+ if (ajj <= 0.f) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of row J. */
+
+ if (j < *n) {
+ i__2 = j - 1;
+ i__3 = *n - j;
+ sgemv_("Transpose", &i__2, &i__3, &c_b1290, &a[(j + 1) *
+ a_dim1 + 1], lda, &a[j * a_dim1 + 1], &c__1, &c_b1011,
+ &a[j + (j + 1) * a_dim1], lda);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + (j + 1) * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+
+/* Compute L(J,J) and test for non-positive-definiteness. */
+
+ i__2 = j - 1;
+ ajj = a[j + j * a_dim1] - sdot_(&i__2, &a[j + a_dim1], lda, &a[j
+ + a_dim1], lda);
+ if (ajj <= 0.f) {
+ a[j + j * a_dim1] = ajj;
+ goto L30;
+ }
+ ajj = sqrt(ajj);
+ a[j + j * a_dim1] = ajj;
+
+/* Compute elements J+1:N of column J. */
+
+ if (j < *n) {
+ i__2 = *n - j;
+ i__3 = j - 1;
+ sgemv_("No transpose", &i__2, &i__3, &c_b1290, &a[j + 1 +
+ a_dim1], lda, &a[j + a_dim1], lda, &c_b1011, &a[j + 1
+ + j * a_dim1], &c__1);
+ i__2 = *n - j;
+ r__1 = 1.f / ajj;
+ sscal_(&i__2, &r__1, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+ goto L40;
+
+L30:
+ *info = j;
+
+L40:
+ return 0;
+
+/* End of SPOTF2 */
+
+} /* spotf2_ */
+
+/* Subroutine */ int spotrf_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer j, jb, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ ), ssyrk_(char *, char *, integer
+ *, integer *, real *, real *, integer *, real *, real *, integer *
+ ), spotf2_(char *, integer *, real *, integer *,
+ integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SPOTRF computes the Cholesky factorization of a real symmetric
+ positive definite matrix A.
+
+ The factorization has the form
+ A = U**T * U, if UPLO = 'U', or
+ A = L * L**T, if UPLO = 'L',
+ where U is an upper triangular matrix and L is lower triangular.
+
+ This is the block version of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+
+ On exit, if INFO = 0, the factor U or L from the Cholesky
+ factorization A = U**T*U or A = L*L**T.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the leading minor of order i is not
+ positive definite, and the factorization could not be
+ completed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRF", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "SPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code. */
+
+ spotf2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code. */
+
+ if (upper) {
+
+/* Compute the Cholesky factorization A = U'*U. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ ssyrk_("Upper", "Transpose", &jb, &i__3, &c_b1290, &a[j *
+ a_dim1 + 1], lda, &c_b1011, &a[j + j * a_dim1], lda);
+ spotf2_("Upper", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block row. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ sgemm_("Transpose", "No transpose", &jb, &i__3, &i__4, &
+ c_b1290, &a[j * a_dim1 + 1], lda, &a[(j + jb) *
+ a_dim1 + 1], lda, &c_b1011, &a[j + (j + jb) *
+ a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ strsm_("Left", "Upper", "Transpose", "Non-unit", &jb, &
+ i__3, &c_b1011, &a[j + j * a_dim1], lda, &a[j + (
+ j + jb) * a_dim1], lda);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the Cholesky factorization A = L*L'. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (j = 1; i__1 < 0 ? j >= i__2 : j <= i__2; j += i__1) {
+
+/*
+ Update and factorize the current diagonal block and test
+ for non-positive-definiteness.
+
+ Computing MIN
+*/
+ i__3 = nb, i__4 = *n - j + 1;
+ jb = min(i__3,i__4);
+ i__3 = j - 1;
+ ssyrk_("Lower", "No transpose", &jb, &i__3, &c_b1290, &a[j +
+ a_dim1], lda, &c_b1011, &a[j + j * a_dim1], lda);
+ spotf2_("Lower", &jb, &a[j + j * a_dim1], lda, info);
+ if (*info != 0) {
+ goto L30;
+ }
+ if (j + jb <= *n) {
+
+/* Compute the current block column. */
+
+ i__3 = *n - j - jb + 1;
+ i__4 = j - 1;
+ sgemm_("No transpose", "Transpose", &i__3, &jb, &i__4, &
+ c_b1290, &a[j + jb + a_dim1], lda, &a[j + a_dim1],
+ lda, &c_b1011, &a[j + jb + j * a_dim1], lda);
+ i__3 = *n - j - jb + 1;
+ strsm_("Right", "Lower", "Transpose", "Non-unit", &i__3, &
+ jb, &c_b1011, &a[j + j * a_dim1], lda, &a[j + jb
+ + j * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+ goto L40;
+
+L30:
+ *info = *info + j - 1;
+
+L40:
+ return 0;
+
+/* End of SPOTRF */
+
+} /* spotrf_ */
+
+/* Subroutine */ int spotri_(char *uplo, integer *n, real *a, integer *lda,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), slauum_(
+ char *, integer *, real *, integer *, integer *), strtri_(
+ char *, char *, integer *, real *, integer *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SPOTRI computes the inverse of a real symmetric positive definite
+ matrix A using the Cholesky factorization A = U**T*U or A = L*L**T
+ computed by SPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the triangular factor U or L from the Cholesky
+ factorization A = U**T*U or A = L*L**T, as computed by
+ SPOTRF.
+ On exit, the upper or lower triangle of the (symmetric)
+ inverse of A, overwriting the input factor U or L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the (i,i) element of the factor U or L is
+ zero, and the inverse could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ strtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ slauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of SPOTRI */
+
+} /* spotri_ */
+
+/* Subroutine */ int spotrs_(char *uplo, integer *n, integer *nrhs, real *a,
+ integer *lda, real *b, integer *ldb, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int strsm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ ), xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ SPOTRS solves a system of linear equations A*X = B with a symmetric
+ positive definite matrix A using the Cholesky factorization
+ A = U**T*U or A = L*L**T computed by SPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) REAL array, dimension (LDA,N)
+ The triangular factor U or L from the Cholesky factorization
+ A = U**T*U or A = L*L**T, as computed by SPOTRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ B (input/output) REAL array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (upper) {
+
+/*
+ Solve A*X = B where A = U'*U.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ strsm_("Left", "Upper", "Transpose", "Non-unit", n, nrhs, &c_b1011, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ strsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b1011,
+ &a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A*X = B where A = L*L'.
+
+ Solve L*X = B, overwriting B with X.
+*/
+
+ strsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b1011,
+ &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ strsm_("Left", "Lower", "Transpose", "Non-unit", n, nrhs, &c_b1011, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of SPOTRS */
+
+} /* spotrs_ */
+
+/* Subroutine */ int sstedc_(char *compz, integer *n, real *d__, real *e,
+ real *z__, integer *ldz, real *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double log(doublereal);
+ integer pow_ii(integer *, integer *);
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k, m;
+ static real p;
+ static integer ii, end, lgn;
+ static real eps, tiny;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
+ integer *, real *, real *, integer *, real *, integer *, real *,
+ real *, integer *);
+ static integer lwmin, start;
+ extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
+ integer *), slaed0_(integer *, integer *, integer *, real *, real
+ *, real *, integer *, real *, integer *, real *, integer *,
+ integer *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *), slacpy_(char *, integer *, integer *, real *, integer *,
+ real *, integer *), slaset_(char *, integer *, integer *,
+ real *, real *, real *, integer *);
+ static integer liwmin, icompz;
+ static real orgnrm;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *),
+ slasrt_(char *, integer *, real *, integer *);
+ static logical lquery;
+ static integer smlsiz;
+ extern /* Subroutine */ int ssteqr_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *);
+ static integer storez, strtrw;
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SSTEDC computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the divide and conquer method.
+ The eigenvectors of a full or band real symmetric matrix can also be
+ found if SSYTRD or SSPTRD or SSBTRD has been used to reduce this
+ matrix to tridiagonal form.
+
+ This code makes very mild assumptions about floating point
+ arithmetic. It will work on machines with a guard digit in
+ add/subtract, or on those binary machines without guard digits
+ which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or Cray-2.
+ It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none. See SLAED3 for details.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'I': Compute eigenvectors of tridiagonal matrix also.
+ = 'V': Compute eigenvectors of original dense symmetric
+ matrix also. On entry, Z contains the orthogonal
+ matrix used to reduce the original matrix to
+ tridiagonal form.
+
+ N (input) INTEGER
+ The dimension of the symmetric tridiagonal matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the subdiagonal elements of the tridiagonal matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) REAL array, dimension (LDZ,N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1.
+ If eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace/output) REAL array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If COMPZ = 'N' or N <= 1 then LWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LWORK must be at least
+ ( 1 + 3*N + 2*N*lg N + 3*N**2 ),
+ where lg( N ) = smallest integer k such
+ that 2**k >= N.
+ If COMPZ = 'I' and N > 1 then LWORK must be at least
+ ( 1 + 4*N + N**2 ).
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If COMPZ = 'N' or N <= 1 then LIWORK must be at least 1.
+ If COMPZ = 'V' and N > 1 then LIWORK must be at least
+ ( 6 + 6*N + 5*N*lg N ).
+ If COMPZ = 'I' and N > 1 then LIWORK must be at least
+ ( 3 + 5*N ).
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit.
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+ > 0: The algorithm failed to compute an eigenvalue while
+ working on the submatrix lying in rows and columns
+ INFO/(N+1) through mod(INFO,N+1).
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ *info = 0;
+ lquery = (*lwork == -1) || (*liwork == -1);
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if ((*n <= 1) || (icompz <= 0)) {
+ liwmin = 1;
+ lwmin = 1;
+ } else {
+ lgn = (integer) (log((real) (*n)) / log(2.f));
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (pow_ii(&c__2, &lgn) < *n) {
+ ++lgn;
+ }
+ if (icompz == 1) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 3 + 1 + ((*n) << (1)) * lgn + i__1 * i__1 * 3;
+ liwmin = *n * 6 + 6 + *n * 5 * lgn;
+ } else if (icompz == 2) {
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = ((*n) << (2)) + 1 + i__1 * i__1;
+ liwmin = *n * 5 + 3;
+ }
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEDC", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+ if (*n == 1) {
+ if (icompz != 0) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+ smlsiz = ilaenv_(&c__9, "SSTEDC", " ", &c__0, &c__0, &c__0, &c__0, (
+ ftnlen)6, (ftnlen)1);
+
+/*
+ If the following conditional clause is removed, then the routine
+ will use the Divide and Conquer routine to compute only the
+ eigenvalues, which requires (3N + 3N**2) real workspace and
+ (2 + 5N + 2N lg(N)) integer workspace.
+ Since on many architectures SSTERF is much faster than any other
+ algorithm for finding eigenvalues only, it is used here
+ as the default.
+
+ If COMPZ = 'N', use SSTERF to compute the eigenvalues.
+*/
+
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ return 0;
+ }
+
+/*
+ If N is smaller than the minimum divide size (SMLSIZ+1), then
+ solve the problem with another solver.
+*/
+
+ if (*n <= smlsiz) {
+ if (icompz == 0) {
+ ssterf_(n, &d__[1], &e[1], info);
+ return 0;
+ } else if (icompz == 2) {
+ ssteqr_("I", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ } else {
+ ssteqr_("V", n, &d__[1], &e[1], &z__[z_offset], ldz, &work[1],
+ info);
+ return 0;
+ }
+ }
+
+/*
+ If COMPZ = 'V', the Z matrix must be stored elsewhere for later
+ use.
+*/
+
+ if (icompz == 1) {
+ storez = *n * *n + 1;
+ } else {
+ storez = 1;
+ }
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b320, &c_b1011, &z__[z_offset], ldz);
+ }
+
+/* Scale. */
+
+ orgnrm = slanst_("M", n, &d__[1], &e[1]);
+ if (orgnrm == 0.f) {
+ return 0;
+ }
+
+ eps = slamch_("Epsilon");
+
+ start = 1;
+
+/* while ( START <= N ) */
+
+L10:
+ if (start <= *n) {
+
+/*
+ Let END be the position of the next subdiagonal entry such that
+ E( END ) <= TINY or END = N if no such subdiagonal exists. The
+ matrix identified by the elements between START and END
+ constitutes an independent sub-problem.
+*/
+
+ end = start;
+L20:
+ if (end < *n) {
+ tiny = eps * sqrt((r__1 = d__[end], dabs(r__1))) * sqrt((r__2 =
+ d__[end + 1], dabs(r__2)));
+ if ((r__1 = e[end], dabs(r__1)) > tiny) {
+ ++end;
+ goto L20;
+ }
+ }
+
+/* (Sub) Problem determined. Compute its size and solve it. */
+
+ m = end - start + 1;
+ if (m == 1) {
+ start = end + 1;
+ goto L10;
+ }
+ if (m > smlsiz) {
+ *info = smlsiz;
+
+/* Scale. */
+
+ orgnrm = slanst_("M", &m, &d__[start], &e[start]);
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &m, &c__1, &d__[
+ start], &m, info);
+ i__1 = m - 1;
+ i__2 = m - 1;
+ slascl_("G", &c__0, &c__0, &orgnrm, &c_b1011, &i__1, &c__1, &e[
+ start], &i__2, info);
+
+ if (icompz == 1) {
+ strtrw = 1;
+ } else {
+ strtrw = start;
+ }
+ slaed0_(&icompz, n, &m, &d__[start], &e[start], &z__[strtrw +
+ start * z_dim1], ldz, &work[1], n, &work[storez], &iwork[
+ 1], info);
+ if (*info != 0) {
+ *info = (*info / (m + 1) + start - 1) * (*n + 1) + *info % (m
+ + 1) + start - 1;
+ return 0;
+ }
+
+/* Scale back. */
+
+ slascl_("G", &c__0, &c__0, &c_b1011, &orgnrm, &m, &c__1, &d__[
+ start], &m, info);
+
+ } else {
+ if (icompz == 1) {
+
+/*
+ Since QR won't update a Z matrix which is larger than the
+ length of D, we must solve the sub-problem in a workspace and
+ then multiply back into Z.
+*/
+
+ ssteqr_("I", &m, &d__[start], &e[start], &work[1], &m, &work[
+ m * m + 1], info);
+ slacpy_("A", n, &m, &z__[start * z_dim1 + 1], ldz, &work[
+ storez], n);
+ sgemm_("N", "N", n, &m, &m, &c_b1011, &work[storez], ldz, &
+ work[1], &m, &c_b320, &z__[start * z_dim1 + 1], ldz);
+ } else if (icompz == 2) {
+ ssteqr_("I", &m, &d__[start], &e[start], &z__[start + start *
+ z_dim1], ldz, &work[1], info);
+ } else {
+ ssterf_(&m, &d__[start], &e[start], info);
+ }
+ if (*info != 0) {
+ *info = start * (*n + 1) + end;
+ return 0;
+ }
+ }
+
+ start = end + 1;
+ goto L10;
+ }
+
+/*
+ endwhile
+
+ If the problem split any number of times, then the eigenvalues
+ will not be properly ordered. Here we permute the eigenvalues
+ (and the associated eigenvectors) into ascending order.
+*/
+
+ if (m != *n) {
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L30: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1
+ + 1], &c__1);
+ }
+/* L40: */
+ }
+ }
+ }
+
+ work[1] = (real) lwmin;
+ iwork[1] = liwmin;
+
+ return 0;
+
+/* End of SSTEDC */
+
+} /* sstedc_ */
+
+/* Subroutine */ int ssteqr_(char *compz, integer *n, real *d__, real *e,
+ real *z__, integer *ldz, real *work, integer *info)
+{
+ /* System generated locals */
+ integer z_dim1, z_offset, i__1, i__2;
+ real r__1, r__2;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static real b, c__, f, g;
+ static integer i__, j, k, l, m;
+ static real p, r__, s;
+ static integer l1, ii, mm, lm1, mm1, nm1;
+ static real rt1, rt2, eps;
+ static integer lsv;
+ static real tst, eps2;
+ static integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ extern logical lsame_(char *, char *);
+ static real anorm;
+ extern /* Subroutine */ int slasr_(char *, char *, char *, integer *,
+ integer *, real *, real *, real *, integer *), sswap_(integer *, real *, integer *, real *, integer *);
+ static integer lendm1, lendp1;
+ extern /* Subroutine */ int slaev2_(real *, real *, real *, real *, real *
+ , real *, real *);
+ extern doublereal slapy2_(real *, real *);
+ static integer iscale;
+ extern doublereal slamch_(char *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ static integer lendsv;
+ extern /* Subroutine */ int slartg_(real *, real *, real *, real *, real *
+ ), slaset_(char *, integer *, integer *, real *, real *, real *,
+ integer *);
+ static real ssfmin;
+ static integer nmaxit, icompz;
+ static real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ SSTEQR computes all eigenvalues and, optionally, eigenvectors of a
+ symmetric tridiagonal matrix using the implicit QL or QR method.
+ The eigenvectors of a full or band symmetric matrix can also be found
+ if SSYTRD or SSPTRD or SSBTRD has been used to reduce this matrix to
+ tridiagonal form.
+
+ Arguments
+ =========
+
+ COMPZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only.
+ = 'V': Compute eigenvalues and eigenvectors of the original
+ symmetric matrix. On entry, Z must contain the
+ orthogonal matrix used to reduce the original matrix
+ to tridiagonal form.
+ = 'I': Compute eigenvalues and eigenvectors of the
+ tridiagonal matrix. Z is initialized to the identity
+ matrix.
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ Z (input/output) REAL array, dimension (LDZ, N)
+ On entry, if COMPZ = 'V', then Z contains the orthogonal
+ matrix used in the reduction to tridiagonal form.
+ On exit, if INFO = 0, then if COMPZ = 'V', Z contains the
+ orthonormal eigenvectors of the original symmetric matrix,
+ and if COMPZ = 'I', Z contains the orthonormal eigenvectors
+ of the symmetric tridiagonal matrix.
+ If COMPZ = 'N', then Z is not referenced.
+
+ LDZ (input) INTEGER
+ The leading dimension of the array Z. LDZ >= 1, and if
+ eigenvectors are desired, then LDZ >= max(1,N).
+
+ WORK (workspace) REAL array, dimension (max(1,2*N-2))
+ If COMPZ = 'N', then WORK is not referenced.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm has failed to find all the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero; on exit, D
+ and E contain the elements of a symmetric tridiagonal
+ matrix which is orthogonally similar to the original
+ matrix.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --d__;
+ --e;
+ z_dim1 = *ldz;
+ z_offset = 1 + z_dim1;
+ z__ -= z_offset;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+
+ if (lsame_(compz, "N")) {
+ icompz = 0;
+ } else if (lsame_(compz, "V")) {
+ icompz = 1;
+ } else if (lsame_(compz, "I")) {
+ icompz = 2;
+ } else {
+ icompz = -1;
+ }
+ if (icompz < 0) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
+ *info = -6;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSTEQR", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ if (icompz == 2) {
+ z__[z_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Determine the unit roundoff and over/underflow thresholds. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/*
+ Compute the eigenvalues and eigenvectors of the tridiagonal
+ matrix.
+*/
+
+ if (icompz == 2) {
+ slaset_("Full", n, n, &c_b320, &c_b1011, &z__[z_offset], ldz);
+ }
+
+ nmaxit = *n * 30;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+ nm1 = *n - 1;
+
+L10:
+ if (l1 > *n) {
+ goto L160;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ if (l1 <= nm1) {
+ i__1 = nm1;
+ for (m = l1; m <= i__1; ++m) {
+ tst = (r__1 = e[m], dabs(r__1));
+ if (tst == 0.f) {
+ goto L30;
+ }
+ if (tst <= sqrt((r__1 = d__[m], dabs(r__1))) * sqrt((r__2 = d__[m
+ + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm == 0.f) {
+ goto L10;
+ }
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend > l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L40:
+ if (l != lend) {
+ lendm1 = lend - 1;
+ i__1 = lendm1;
+ for (m = l; m <= i__1; ++m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ + 1], dabs(r__2)) + safmin) {
+ goto L60;
+ }
+/* L50: */
+ }
+ }
+
+ m = lend;
+
+L60:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L80;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use SLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l + 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2, &c__, &s);
+ work[l] = c__;
+ work[*n - 1 + l] = s;
+ slasr_("R", "V", "B", n, &c__2, &work[l], &work[*n - 1 + l], &
+ z__[l * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l], &e[l], &d__[l + 1], &rt1, &rt2);
+ }
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l + 1] - p) / (e[l] * 2.f);
+ r__ = slapy2_(&g, &c_b1011);
+ g = d__[m] - p + e[l] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ mm1 = m - 1;
+ i__1 = l;
+ for (i__ = mm1; i__ >= i__1; --i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m - 1) {
+ e[i__ + 1] = r__;
+ }
+ g = d__[i__ + 1] - p;
+ r__ = (d__[i__] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__ + 1] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = -s;
+ }
+
+/* L70: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = m - l + 1;
+ slasr_("R", "V", "B", n, &mm, &work[l], &work[*n - 1 + l], &z__[l
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[l] = g;
+ goto L40;
+
+/* Eigenvalue found. */
+
+L80:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L40;
+ }
+ goto L140;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L90:
+ if (l != lend) {
+ lendp1 = lend + 1;
+ i__1 = lendp1;
+ for (m = l; m >= i__1; --m) {
+/* Computing 2nd power */
+ r__2 = (r__1 = e[m - 1], dabs(r__1));
+ tst = r__2 * r__2;
+ if (tst <= eps2 * (r__1 = d__[m], dabs(r__1)) * (r__2 = d__[m
+ - 1], dabs(r__2)) + safmin) {
+ goto L110;
+ }
+/* L100: */
+ }
+ }
+
+ m = lend;
+
+L110:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L130;
+ }
+
+/*
+ If remaining matrix is 2-by-2, use SLAE2 or SLAEV2
+ to compute its eigensystem.
+*/
+
+ if (m == l - 1) {
+ if (icompz > 0) {
+ slaev2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2, &c__, &s)
+ ;
+ work[m] = c__;
+ work[*n - 1 + m] = s;
+ slasr_("R", "V", "F", n, &c__2, &work[m], &work[*n - 1 + m], &
+ z__[(l - 1) * z_dim1 + 1], ldz);
+ } else {
+ slae2_(&d__[l - 1], &e[l - 1], &d__[l], &rt1, &rt2);
+ }
+ d__[l - 1] = rt1;
+ d__[l] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+ }
+
+ if (jtot == nmaxit) {
+ goto L140;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ g = (d__[l - 1] - p) / (e[l - 1] * 2.f);
+ r__ = slapy2_(&g, &c_b1011);
+ g = d__[m] - p + e[l - 1] / (g + r_sign(&r__, &g));
+
+ s = 1.f;
+ c__ = 1.f;
+ p = 0.f;
+
+/* Inner loop */
+
+ lm1 = l - 1;
+ i__1 = lm1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ f = s * e[i__];
+ b = c__ * e[i__];
+ slartg_(&g, &f, &c__, &s, &r__);
+ if (i__ != m) {
+ e[i__ - 1] = r__;
+ }
+ g = d__[i__] - p;
+ r__ = (d__[i__ + 1] - g) * s + c__ * 2.f * b;
+ p = s * r__;
+ d__[i__] = g + p;
+ g = c__ * r__ - b;
+
+/* If eigenvectors are desired, then save rotations. */
+
+ if (icompz > 0) {
+ work[i__] = c__;
+ work[*n - 1 + i__] = s;
+ }
+
+/* L120: */
+ }
+
+/* If eigenvectors are desired, then apply saved rotations. */
+
+ if (icompz > 0) {
+ mm = l - m + 1;
+ slasr_("R", "V", "F", n, &mm, &work[m], &work[*n - 1 + m], &z__[m
+ * z_dim1 + 1], ldz);
+ }
+
+ d__[l] -= p;
+ e[lm1] = g;
+ goto L90;
+
+/* Eigenvalue found. */
+
+L130:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L90;
+ }
+ goto L140;
+
+ }
+
+/* Undo scaling if necessary */
+
+L140:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ } else if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ i__1 = lendsv - lsv;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &e[lsv], n,
+ info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L150: */
+ }
+ goto L190;
+
+/* Order eigenvalues and eigenvectors. */
+
+L160:
+ if (icompz == 0) {
+
+/* Use Quick Sort */
+
+ slasrt_("I", n, &d__[1], info);
+
+ } else {
+
+/* Use Selection Sort to minimize swaps of eigenvectors */
+
+ i__1 = *n;
+ for (ii = 2; ii <= i__1; ++ii) {
+ i__ = ii - 1;
+ k = i__;
+ p = d__[i__];
+ i__2 = *n;
+ for (j = ii; j <= i__2; ++j) {
+ if (d__[j] < p) {
+ k = j;
+ p = d__[j];
+ }
+/* L170: */
+ }
+ if (k != i__) {
+ d__[k] = d__[i__];
+ d__[i__] = p;
+ sswap_(n, &z__[i__ * z_dim1 + 1], &c__1, &z__[k * z_dim1 + 1],
+ &c__1);
+ }
+/* L180: */
+ }
+ }
+
+L190:
+ return 0;
+
+/* End of SSTEQR */
+
+} /* ssteqr_ */
+
+/* Subroutine */ int ssterf_(integer *n, real *d__, real *e, integer *info)
+{
+ /* System generated locals */
+ integer i__1;
+ real r__1, r__2, r__3;
+
+ /* Builtin functions */
+ double sqrt(doublereal), r_sign(real *, real *);
+
+ /* Local variables */
+ static real c__;
+ static integer i__, l, m;
+ static real p, r__, s;
+ static integer l1;
+ static real bb, rt1, rt2, eps, rte;
+ static integer lsv;
+ static real eps2, oldc;
+ static integer lend, jtot;
+ extern /* Subroutine */ int slae2_(real *, real *, real *, real *, real *)
+ ;
+ static real gamma, alpha, sigma, anorm;
+ extern doublereal slapy2_(real *, real *);
+ static integer iscale;
+ static real oldgam;
+ extern doublereal slamch_(char *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real safmax;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ static integer lendsv;
+ static real ssfmin;
+ static integer nmaxit;
+ static real ssfmax;
+ extern doublereal slanst_(char *, integer *, real *, real *);
+ extern /* Subroutine */ int slasrt_(char *, integer *, real *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SSTERF computes all eigenvalues of a symmetric tridiagonal matrix
+ using the Pal-Walker-Kahan variant of the QL or QR algorithm.
+
+ Arguments
+ =========
+
+ N (input) INTEGER
+ The order of the matrix. N >= 0.
+
+ D (input/output) REAL array, dimension (N)
+ On entry, the n diagonal elements of the tridiagonal matrix.
+ On exit, if INFO = 0, the eigenvalues in ascending order.
+
+ E (input/output) REAL array, dimension (N-1)
+ On entry, the (n-1) subdiagonal elements of the tridiagonal
+ matrix.
+ On exit, E has been destroyed.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: the algorithm failed to find all of the eigenvalues in
+ a total of 30*N iterations; if INFO = i, then i
+ elements of E have not converged to zero.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ --e;
+ --d__;
+
+ /* Function Body */
+ *info = 0;
+
+/* Quick return if possible */
+
+ if (*n < 0) {
+ *info = -1;
+ i__1 = -(*info);
+ xerbla_("SSTERF", &i__1);
+ return 0;
+ }
+ if (*n <= 1) {
+ return 0;
+ }
+
+/* Determine the unit roundoff for this environment. */
+
+ eps = slamch_("E");
+/* Computing 2nd power */
+ r__1 = eps;
+ eps2 = r__1 * r__1;
+ safmin = slamch_("S");
+ safmax = 1.f / safmin;
+ ssfmax = sqrt(safmax) / 3.f;
+ ssfmin = sqrt(safmin) / eps2;
+
+/* Compute the eigenvalues of the tridiagonal matrix. */
+
+ nmaxit = *n * 30;
+ sigma = 0.f;
+ jtot = 0;
+
+/*
+ Determine where the matrix splits and choose QL or QR iteration
+ for each block, according to whether top or bottom diagonal
+ element is smaller.
+*/
+
+ l1 = 1;
+
+L10:
+ if (l1 > *n) {
+ goto L170;
+ }
+ if (l1 > 1) {
+ e[l1 - 1] = 0.f;
+ }
+ i__1 = *n - 1;
+ for (m = l1; m <= i__1; ++m) {
+ if ((r__3 = e[m], dabs(r__3)) <= sqrt((r__1 = d__[m], dabs(r__1))) *
+ sqrt((r__2 = d__[m + 1], dabs(r__2))) * eps) {
+ e[m] = 0.f;
+ goto L30;
+ }
+/* L20: */
+ }
+ m = *n;
+
+L30:
+ l = l1;
+ lsv = l;
+ lend = m;
+ lendsv = lend;
+ l1 = m + 1;
+ if (lend == l) {
+ goto L10;
+ }
+
+/* Scale submatrix in rows and columns L to LEND */
+
+ i__1 = lend - l + 1;
+ anorm = slanst_("I", &i__1, &d__[l], &e[l]);
+ iscale = 0;
+ if (anorm > ssfmax) {
+ iscale = 1;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmax, &i__1, &c__1, &e[l], n,
+ info);
+ } else if (anorm < ssfmin) {
+ iscale = 2;
+ i__1 = lend - l + 1;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &d__[l], n,
+ info);
+ i__1 = lend - l;
+ slascl_("G", &c__0, &c__0, &anorm, &ssfmin, &i__1, &c__1, &e[l], n,
+ info);
+ }
+
+ i__1 = lend - 1;
+ for (i__ = l; i__ <= i__1; ++i__) {
+/* Computing 2nd power */
+ r__1 = e[i__];
+ e[i__] = r__1 * r__1;
+/* L40: */
+ }
+
+/* Choose between QL and QR iteration */
+
+ if ((r__1 = d__[lend], dabs(r__1)) < (r__2 = d__[l], dabs(r__2))) {
+ lend = lsv;
+ l = lendsv;
+ }
+
+ if (lend >= l) {
+
+/*
+ QL Iteration
+
+ Look for small subdiagonal element.
+*/
+
+L50:
+ if (l != lend) {
+ i__1 = lend - 1;
+ for (m = l; m <= i__1; ++m) {
+ if ((r__2 = e[m], dabs(r__2)) <= eps2 * (r__1 = d__[m] * d__[
+ m + 1], dabs(r__1))) {
+ goto L70;
+ }
+/* L60: */
+ }
+ }
+ m = lend;
+
+L70:
+ if (m < lend) {
+ e[m] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L90;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use SLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l + 1) {
+ rte = sqrt(e[l]);
+ slae2_(&d__[l], &rte, &d__[l + 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l + 1] = rt2;
+ e[l] = 0.f;
+ l += 2;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l]);
+ sigma = (d__[l + 1] - p) / (rte * 2.f);
+ r__ = slapy2_(&sigma, &c_b1011);
+ sigma = p - rte / (sigma + r_sign(&r__, &sigma));
+
+ c__ = 1.f;
+ s = 0.f;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l;
+ for (i__ = m - 1; i__ >= i__1; --i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m - 1) {
+ e[i__ + 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__ + 1] = oldgam + (alpha - gamma);
+ if (c__ != 0.f) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L80: */
+ }
+
+ e[l] = s * p;
+ d__[l] = sigma + gamma;
+ goto L50;
+
+/* Eigenvalue found. */
+
+L90:
+ d__[l] = p;
+
+ ++l;
+ if (l <= lend) {
+ goto L50;
+ }
+ goto L150;
+
+ } else {
+
+/*
+ QR Iteration
+
+ Look for small superdiagonal element.
+*/
+
+L100:
+ i__1 = lend + 1;
+ for (m = l; m >= i__1; --m) {
+ if ((r__2 = e[m - 1], dabs(r__2)) <= eps2 * (r__1 = d__[m] * d__[
+ m - 1], dabs(r__1))) {
+ goto L120;
+ }
+/* L110: */
+ }
+ m = lend;
+
+L120:
+ if (m > lend) {
+ e[m - 1] = 0.f;
+ }
+ p = d__[l];
+ if (m == l) {
+ goto L140;
+ }
+
+/*
+ If remaining matrix is 2 by 2, use SLAE2 to compute its
+ eigenvalues.
+*/
+
+ if (m == l - 1) {
+ rte = sqrt(e[l - 1]);
+ slae2_(&d__[l], &rte, &d__[l - 1], &rt1, &rt2);
+ d__[l] = rt1;
+ d__[l - 1] = rt2;
+ e[l - 1] = 0.f;
+ l += -2;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+ }
+
+ if (jtot == nmaxit) {
+ goto L150;
+ }
+ ++jtot;
+
+/* Form shift. */
+
+ rte = sqrt(e[l - 1]);
+ sigma = (d__[l - 1] - p) / (rte * 2.f);
+ r__ = slapy2_(&sigma, &c_b1011);
+ sigma = p - rte / (sigma + r_sign(&r__, &sigma));
+
+ c__ = 1.f;
+ s = 0.f;
+ gamma = d__[m] - sigma;
+ p = gamma * gamma;
+
+/* Inner loop */
+
+ i__1 = l - 1;
+ for (i__ = m; i__ <= i__1; ++i__) {
+ bb = e[i__];
+ r__ = p + bb;
+ if (i__ != m) {
+ e[i__ - 1] = s * r__;
+ }
+ oldc = c__;
+ c__ = p / r__;
+ s = bb / r__;
+ oldgam = gamma;
+ alpha = d__[i__ + 1];
+ gamma = c__ * (alpha - sigma) - s * oldgam;
+ d__[i__] = oldgam + (alpha - gamma);
+ if (c__ != 0.f) {
+ p = gamma * gamma / c__;
+ } else {
+ p = oldc * bb;
+ }
+/* L130: */
+ }
+
+ e[l - 1] = s * p;
+ d__[l] = sigma + gamma;
+ goto L100;
+
+/* Eigenvalue found. */
+
+L140:
+ d__[l] = p;
+
+ --l;
+ if (l >= lend) {
+ goto L100;
+ }
+ goto L150;
+
+ }
+
+/* Undo scaling if necessary */
+
+L150:
+ if (iscale == 1) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmax, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+ if (iscale == 2) {
+ i__1 = lendsv - lsv + 1;
+ slascl_("G", &c__0, &c__0, &ssfmin, &anorm, &i__1, &c__1, &d__[lsv],
+ n, info);
+ }
+
+/*
+ Check for no convergence to an eigenvalue after a total
+ of N*MAXIT iterations.
+*/
+
+ if (jtot < nmaxit) {
+ goto L10;
+ }
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ if (e[i__] != 0.f) {
+ ++(*info);
+ }
+/* L160: */
+ }
+ goto L180;
+
+/* Sort eigenvalues in increasing order. */
+
+L170:
+ slasrt_("I", n, &d__[1], info);
+
+L180:
+ return 0;
+
+/* End of SSTERF */
+
+} /* ssterf_ */
+
+/* Subroutine */ int ssyevd_(char *jobz, char *uplo, integer *n, real *a,
+ integer *lda, real *w, real *work, integer *lwork, integer *iwork,
+ integer *liwork, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ real r__1;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static real eps;
+ static integer inde;
+ static real anrm, rmin, rmax;
+ static integer lopt;
+ static real sigma;
+ extern logical lsame_(char *, char *);
+ static integer iinfo;
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static integer lwmin, liopt;
+ static logical lower, wantz;
+ static integer indwk2, llwrk2, iscale;
+ extern doublereal slamch_(char *);
+ static real safmin;
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real bignum;
+ extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
+ real *, integer *, integer *, real *, integer *, integer *);
+ static integer indtau;
+ extern /* Subroutine */ int sstedc_(char *, integer *, real *, real *,
+ real *, integer *, real *, integer *, integer *, integer *,
+ integer *), slacpy_(char *, integer *, integer *, real *,
+ integer *, real *, integer *);
+ static integer indwrk, liwmin;
+ extern /* Subroutine */ int ssterf_(integer *, real *, real *, integer *);
+ extern doublereal slansy_(char *, char *, integer *, real *, integer *,
+ real *);
+ static integer llwork;
+ static real smlnum;
+ static logical lquery;
+ extern /* Subroutine */ int sormtr_(char *, char *, char *, integer *,
+ integer *, real *, integer *, real *, real *, integer *, real *,
+ integer *, integer *), ssytrd_(char *,
+ integer *, real *, integer *, real *, real *, real *, real *,
+ integer *, integer *);
+
+
+/*
+ -- LAPACK driver routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SSYEVD computes all eigenvalues and, optionally, eigenvectors of a
+ real symmetric matrix A. If eigenvectors are desired, it uses a
+ divide and conquer algorithm.
+
+ The divide and conquer algorithm makes very mild assumptions about
+ floating point arithmetic. It will work on machines with a guard
+ digit in add/subtract, or on those binary machines without guard
+ digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or
+ Cray-2. It could conceivably fail on hexadecimal or decimal machines
+ without guard digits, but we know of none.
+
+ Because of large use of BLAS of level 3, SSYEVD needs N**2 more
+ workspace than SSYEVX.
+
+ Arguments
+ =========
+
+ JOBZ (input) CHARACTER*1
+ = 'N': Compute eigenvalues only;
+ = 'V': Compute eigenvalues and eigenvectors.
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA, N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of A contains the
+ upper triangular part of the matrix A. If UPLO = 'L',
+ the leading N-by-N lower triangular part of A contains
+ the lower triangular part of the matrix A.
+ On exit, if JOBZ = 'V', then if INFO = 0, A contains the
+ orthonormal eigenvectors of the matrix A.
+ If JOBZ = 'N', then on exit the lower triangle (if UPLO='L')
+ or the upper triangle (if UPLO='U') of A, including the
+ diagonal, is destroyed.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ W (output) REAL array, dimension (N)
+ If INFO = 0, the eigenvalues in ascending order.
+
+ WORK (workspace/output) REAL array,
+ dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK.
+ If N <= 1, LWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LWORK must be at least 2*N+1.
+ If JOBZ = 'V' and N > 1, LWORK must be at least
+ 1 + 6*N + 2*N**2.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ IWORK (workspace/output) INTEGER array, dimension (LIWORK)
+ On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK.
+
+ LIWORK (input) INTEGER
+ The dimension of the array IWORK.
+ If N <= 1, LIWORK must be at least 1.
+ If JOBZ = 'N' and N > 1, LIWORK must be at least 1.
+ If JOBZ = 'V' and N > 1, LIWORK must be at least 3 + 5*N.
+
+ If LIWORK = -1, then a workspace query is assumed; the
+ routine only calculates the optimal size of the IWORK array,
+ returns this value as the first entry of the IWORK array, and
+ no error message related to LIWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the algorithm failed to converge; i
+ off-diagonal elements of an intermediate tridiagonal
+ form did not converge to zero.
+
+ Further Details
+ ===============
+
+ Based on contributions by
+ Jeff Rutter, Computer Science Division, University of California
+ at Berkeley, USA
+ Modified by Francoise Tisseur, University of Tennessee.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --w;
+ --work;
+ --iwork;
+
+ /* Function Body */
+ wantz = lsame_(jobz, "V");
+ lower = lsame_(uplo, "L");
+ lquery = (*lwork == -1) || (*liwork == -1);
+
+ *info = 0;
+ if (*n <= 1) {
+ liwmin = 1;
+ lwmin = 1;
+ lopt = lwmin;
+ liopt = liwmin;
+ } else {
+ if (wantz) {
+ liwmin = *n * 5 + 3;
+/* Computing 2nd power */
+ i__1 = *n;
+ lwmin = *n * 6 + 1 + ((i__1 * i__1) << (1));
+ } else {
+ liwmin = 1;
+ lwmin = ((*n) << (1)) + 1;
+ }
+ lopt = lwmin;
+ liopt = liwmin;
+ }
+ if (! ((wantz) || (lsame_(jobz, "N")))) {
+ *info = -1;
+ } else if (! ((lower) || (lsame_(uplo, "U")))) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*lwork < lwmin && ! lquery) {
+ *info = -8;
+ } else if (*liwork < liwmin && ! lquery) {
+ *info = -10;
+ }
+
+ if (*info == 0) {
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYEVD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (*n == 1) {
+ w[1] = a[a_dim1 + 1];
+ if (wantz) {
+ a[a_dim1 + 1] = 1.f;
+ }
+ return 0;
+ }
+
+/* Get machine constants. */
+
+ safmin = slamch_("Safe minimum");
+ eps = slamch_("Precision");
+ smlnum = safmin / eps;
+ bignum = 1.f / smlnum;
+ rmin = sqrt(smlnum);
+ rmax = sqrt(bignum);
+
+/* Scale matrix to allowable range, if necessary. */
+
+ anrm = slansy_("M", uplo, n, &a[a_offset], lda, &work[1]);
+ iscale = 0;
+ if (anrm > 0.f && anrm < rmin) {
+ iscale = 1;
+ sigma = rmin / anrm;
+ } else if (anrm > rmax) {
+ iscale = 1;
+ sigma = rmax / anrm;
+ }
+ if (iscale == 1) {
+ slascl_(uplo, &c__0, &c__0, &c_b1011, &sigma, n, n, &a[a_offset], lda,
+ info);
+ }
+
+/* Call SSYTRD to reduce symmetric matrix to tridiagonal form. */
+
+ inde = 1;
+ indtau = inde + *n;
+ indwrk = indtau + *n;
+ llwork = *lwork - indwrk + 1;
+ indwk2 = indwrk + *n * *n;
+ llwrk2 = *lwork - indwk2 + 1;
+
+ ssytrd_(uplo, n, &a[a_offset], lda, &w[1], &work[inde], &work[indtau], &
+ work[indwrk], &llwork, &iinfo);
+ lopt = ((*n) << (1)) + work[indwrk];
+
+/*
+ For eigenvalues only, call SSTERF. For eigenvectors, first call
+ SSTEDC to generate the eigenvector matrix, WORK(INDWRK), of the
+ tridiagonal matrix, then call SORMTR to multiply it by the
+ Householder transformations stored in A.
+*/
+
+ if (! wantz) {
+ ssterf_(n, &w[1], &work[inde], info);
+ } else {
+ sstedc_("I", n, &w[1], &work[inde], &work[indwrk], n, &work[indwk2], &
+ llwrk2, &iwork[1], liwork, info);
+ sormtr_("L", uplo, "N", n, n, &a[a_offset], lda, &work[indtau], &work[
+ indwrk], n, &work[indwk2], &llwrk2, &iinfo);
+ slacpy_("A", n, n, &work[indwrk], n, &a[a_offset], lda);
+/*
+ Computing MAX
+ Computing 2nd power
+*/
+ i__3 = *n;
+ i__1 = lopt, i__2 = *n * 6 + 1 + ((i__3 * i__3) << (1));
+ lopt = max(i__1,i__2);
+ }
+
+/* If matrix was scaled, then rescale eigenvalues appropriately. */
+
+ if (iscale == 1) {
+ r__1 = 1.f / sigma;
+ sscal_(n, &r__1, &w[1], &c__1);
+ }
+
+ work[1] = (real) lopt;
+ iwork[1] = liopt;
+
+ return 0;
+
+/* End of SSYEVD */
+
+} /* ssyevd_ */
+
+/* Subroutine */ int ssytd2_(char *uplo, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tau, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__;
+ static real taui;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ extern /* Subroutine */ int ssyr2_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *, integer *);
+ static real alpha;
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), ssymv_(char *, integer *, real *, real *,
+ integer *, real *, integer *, real *, real *, integer *),
+ xerbla_(char *, integer *), slarfg_(integer *, real *,
+ real *, integer *, real *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ October 31, 1992
+
+
+ Purpose
+ =======
+
+ SSYTD2 reduces a real symmetric matrix A to symmetric tridiagonal
+ form T by an orthogonal similarity transformation: Q' * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the upper or lower triangular part of the
+ symmetric matrix A is stored:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ n-by-n upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n-by-n lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) REAL array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) REAL array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value.
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTD2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n <= 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Reduce the upper triangle of A */
+
+ for (i__ = *n - 1; i__ >= 1; --i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(1:i-1,i+1)
+*/
+
+ slarfg_(&i__, &a[i__ + (i__ + 1) * a_dim1], &a[(i__ + 1) * a_dim1
+ + 1], &c__1, &taui);
+ e[i__] = a[i__ + (i__ + 1) * a_dim1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(1:i,1:i) */
+
+ a[i__ + (i__ + 1) * a_dim1] = 1.f;
+
+/* Compute x := tau * A * v storing x in TAU(1:i) */
+
+ ssymv_(uplo, &i__, &taui, &a[a_offset], lda, &a[(i__ + 1) *
+ a_dim1 + 1], &c__1, &c_b320, &tau[1], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ alpha = taui * -.5f * sdot_(&i__, &tau[1], &c__1, &a[(i__ + 1)
+ * a_dim1 + 1], &c__1);
+ saxpy_(&i__, &alpha, &a[(i__ + 1) * a_dim1 + 1], &c__1, &tau[
+ 1], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ ssyr2_(uplo, &i__, &c_b1290, &a[(i__ + 1) * a_dim1 + 1], &
+ c__1, &tau[1], &c__1, &a[a_offset], lda);
+
+ a[i__ + (i__ + 1) * a_dim1] = e[i__];
+ }
+ d__[i__ + 1] = a[i__ + 1 + (i__ + 1) * a_dim1];
+ tau[i__] = taui;
+/* L10: */
+ }
+ d__[1] = a[a_dim1 + 1];
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__1 = *n - 1;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+
+/*
+ Generate elementary reflector H(i) = I - tau * v * v'
+ to annihilate A(i+2:n,i)
+*/
+
+ i__2 = *n - i__;
+/* Computing MIN */
+ i__3 = i__ + 2;
+ slarfg_(&i__2, &a[i__ + 1 + i__ * a_dim1], &a[min(i__3,*n) + i__ *
+ a_dim1], &c__1, &taui);
+ e[i__] = a[i__ + 1 + i__ * a_dim1];
+
+ if (taui != 0.f) {
+
+/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
+
+ a[i__ + 1 + i__ * a_dim1] = 1.f;
+
+/* Compute x := tau * A * v storing y in TAU(i:n-1) */
+
+ i__2 = *n - i__;
+ ssymv_(uplo, &i__2, &taui, &a[i__ + 1 + (i__ + 1) * a_dim1],
+ lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &c_b320, &tau[
+ i__], &c__1);
+
+/* Compute w := x - 1/2 * tau * (x'*v) * v */
+
+ i__2 = *n - i__;
+ alpha = taui * -.5f * sdot_(&i__2, &tau[i__], &c__1, &a[i__ +
+ 1 + i__ * a_dim1], &c__1);
+ i__2 = *n - i__;
+ saxpy_(&i__2, &alpha, &a[i__ + 1 + i__ * a_dim1], &c__1, &tau[
+ i__], &c__1);
+
+/*
+ Apply the transformation as a rank-2 update:
+ A := A - v * w' - w * v'
+*/
+
+ i__2 = *n - i__;
+ ssyr2_(uplo, &i__2, &c_b1290, &a[i__ + 1 + i__ * a_dim1], &
+ c__1, &tau[i__], &c__1, &a[i__ + 1 + (i__ + 1) *
+ a_dim1], lda);
+
+ a[i__ + 1 + i__ * a_dim1] = e[i__];
+ }
+ d__[i__] = a[i__ + i__ * a_dim1];
+ tau[i__] = taui;
+/* L20: */
+ }
+ d__[*n] = a[*n + *n * a_dim1];
+ }
+
+ return 0;
+
+/* End of SSYTD2 */
+
+} /* ssytd2_ */
+
+/* Subroutine */ int ssytrd_(char *uplo, integer *n, real *a, integer *lda,
+ real *d__, real *e, real *tau, real *work, integer *lwork, integer *
+ info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+
+ /* Local variables */
+ static integer i__, j, nb, kk, nx, iws;
+ extern logical lsame_(char *, char *);
+ static integer nbmin, iinfo;
+ static logical upper;
+ extern /* Subroutine */ int ssytd2_(char *, integer *, real *, integer *,
+ real *, real *, real *, integer *), ssyr2k_(char *, char *
+ , integer *, integer *, real *, real *, integer *, real *,
+ integer *, real *, real *, integer *), xerbla_(
+ char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ extern /* Subroutine */ int slatrd_(char *, integer *, integer *, real *,
+ integer *, real *, real *, real *, integer *);
+ static integer ldwork, lwkopt;
+ static logical lquery;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ SSYTRD reduces a real symmetric matrix A to real symmetric
+ tridiagonal form T by an orthogonal similarity transformation:
+ Q**T * A * Q = T.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the symmetric matrix A. If UPLO = 'U', the leading
+ N-by-N upper triangular part of A contains the upper
+ triangular part of the matrix A, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of A contains the lower
+ triangular part of the matrix A, and the strictly upper
+ triangular part of A is not referenced.
+ On exit, if UPLO = 'U', the diagonal and first superdiagonal
+ of A are overwritten by the corresponding elements of the
+ tridiagonal matrix T, and the elements above the first
+ superdiagonal, with the array TAU, represent the orthogonal
+ matrix Q as a product of elementary reflectors; if UPLO
+ = 'L', the diagonal and first subdiagonal of A are over-
+ written by the corresponding elements of the tridiagonal
+ matrix T, and the elements below the first subdiagonal, with
+ the array TAU, represent the orthogonal matrix Q as a product
+ of elementary reflectors. See Further Details.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ D (output) REAL array, dimension (N)
+ The diagonal elements of the tridiagonal matrix T:
+ D(i) = A(i,i).
+
+ E (output) REAL array, dimension (N-1)
+ The off-diagonal elements of the tridiagonal matrix T:
+ E(i) = A(i,i+1) if UPLO = 'U', E(i) = A(i+1,i) if UPLO = 'L'.
+
+ TAU (output) REAL array, dimension (N-1)
+ The scalar factors of the elementary reflectors (see Further
+ Details).
+
+ WORK (workspace/output) REAL array, dimension (LWORK)
+ On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
+
+ LWORK (input) INTEGER
+ The dimension of the array WORK. LWORK >= 1.
+ For optimum performance LWORK >= N*NB, where NB is the
+ optimal blocksize.
+
+ If LWORK = -1, then a workspace query is assumed; the routine
+ only calculates the optimal size of the WORK array, returns
+ this value as the first entry of the WORK array, and no error
+ message related to LWORK is issued by XERBLA.
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ If UPLO = 'U', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(n-1) . . . H(2) H(1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(i+1:n) = 0 and v(i) = 1; v(1:i-1) is stored on exit in
+ A(1:i-1,i+1), and tau in TAU(i).
+
+ If UPLO = 'L', the matrix Q is represented as a product of elementary
+ reflectors
+
+ Q = H(1) H(2) . . . H(n-1).
+
+ Each H(i) has the form
+
+ H(i) = I - tau * v * v'
+
+ where tau is a real scalar, and v is a real vector with
+ v(1:i) = 0 and v(i+1) = 1; v(i+2:n) is stored on exit in A(i+2:n,i),
+ and tau in TAU(i).
+
+ The contents of A on exit are illustrated by the following examples
+ with n = 5:
+
+ if UPLO = 'U': if UPLO = 'L':
+
+ ( d e v2 v3 v4 ) ( d )
+ ( d e v3 v4 ) ( e d )
+ ( d e v4 ) ( v1 e d )
+ ( d e ) ( v1 v2 e d )
+ ( d ) ( v1 v2 v3 e d )
+
+ where d and e denote diagonal and off-diagonal elements of T, and vi
+ denotes an element of the vector defining H(i).
+
+ =====================================================================
+
+
+ Test the input parameters
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ --d__;
+ --e;
+ --tau;
+ --work;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ lquery = *lwork == -1;
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ } else if (*lwork < 1 && ! lquery) {
+ *info = -9;
+ }
+
+ if (*info == 0) {
+
+/* Determine the block size. */
+
+ nb = ilaenv_(&c__1, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6,
+ (ftnlen)1);
+ lwkopt = *n * nb;
+ work[1] = (real) lwkopt;
+ }
+
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("SSYTRD", &i__1);
+ return 0;
+ } else if (lquery) {
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ work[1] = 1.f;
+ return 0;
+ }
+
+ nx = *n;
+ iws = 1;
+ if (nb > 1 && nb < *n) {
+
+/*
+ Determine when to cross over from blocked to unblocked code
+ (last block is always handled by unblocked code).
+
+ Computing MAX
+*/
+ i__1 = nb, i__2 = ilaenv_(&c__3, "SSYTRD", uplo, n, &c_n1, &c_n1, &
+ c_n1, (ftnlen)6, (ftnlen)1);
+ nx = max(i__1,i__2);
+ if (nx < *n) {
+
+/* Determine if workspace is large enough for blocked code. */
+
+ ldwork = *n;
+ iws = ldwork * nb;
+ if (*lwork < iws) {
+
+/*
+ Not enough workspace to use optimal NB: determine the
+ minimum value of NB, and reduce NB or force use of
+ unblocked code by setting NX = N.
+
+ Computing MAX
+*/
+ i__1 = *lwork / ldwork;
+ nb = max(i__1,1);
+ nbmin = ilaenv_(&c__2, "SSYTRD", uplo, n, &c_n1, &c_n1, &c_n1,
+ (ftnlen)6, (ftnlen)1);
+ if (nb < nbmin) {
+ nx = *n;
+ }
+ }
+ } else {
+ nx = *n;
+ }
+ } else {
+ nb = 1;
+ }
+
+ if (upper) {
+
+/*
+ Reduce the upper triangle of A.
+ Columns 1:kk are handled by the unblocked method.
+*/
+
+ kk = *n - (*n - nx + nb - 1) / nb * nb;
+ i__1 = kk + 1;
+ i__2 = -nb;
+ for (i__ = *n - nb + 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
+ i__2) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = i__ + nb - 1;
+ slatrd_(uplo, &i__3, &nb, &a[a_offset], lda, &e[1], &tau[1], &
+ work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(1:i-1,1:i-1), using an
+ update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = i__ - 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &nb, &c_b1290, &a[i__ *
+ a_dim1 + 1], lda, &work[1], &ldwork, &c_b1011, &a[
+ a_offset], lda);
+
+/*
+ Copy superdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j - 1 + j * a_dim1] = e[j - 1];
+ d__[j] = a[j + j * a_dim1];
+/* L10: */
+ }
+/* L20: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ ssytd2_(uplo, &kk, &a[a_offset], lda, &d__[1], &e[1], &tau[1], &iinfo);
+ } else {
+
+/* Reduce the lower triangle of A */
+
+ i__2 = *n - nx;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+
+/*
+ Reduce columns i:i+nb-1 to tridiagonal form and form the
+ matrix W which is needed to update the unreduced part of
+ the matrix
+*/
+
+ i__3 = *n - i__ + 1;
+ slatrd_(uplo, &i__3, &nb, &a[i__ + i__ * a_dim1], lda, &e[i__], &
+ tau[i__], &work[1], &ldwork);
+
+/*
+ Update the unreduced submatrix A(i+ib:n,i+ib:n), using
+ an update of the form: A := A - V*W' - W*V'
+*/
+
+ i__3 = *n - i__ - nb + 1;
+ ssyr2k_(uplo, "No transpose", &i__3, &nb, &c_b1290, &a[i__ + nb +
+ i__ * a_dim1], lda, &work[nb + 1], &ldwork, &c_b1011, &a[
+ i__ + nb + (i__ + nb) * a_dim1], lda);
+
+/*
+ Copy subdiagonal elements back into A, and diagonal
+ elements into D
+*/
+
+ i__3 = i__ + nb - 1;
+ for (j = i__; j <= i__3; ++j) {
+ a[j + 1 + j * a_dim1] = e[j];
+ d__[j] = a[j + j * a_dim1];
+/* L30: */
+ }
+/* L40: */
+ }
+
+/* Use unblocked code to reduce the last or only block */
+
+ i__1 = *n - i__ + 1;
+ ssytd2_(uplo, &i__1, &a[i__ + i__ * a_dim1], lda, &d__[i__], &e[i__],
+ &tau[i__], &iinfo);
+ }
+
+ work[1] = (real) lwkopt;
+ return 0;
+
+/* End of SSYTRD */
+
+} /* ssytrd_ */
+
+/* Subroutine */ int strevc_(char *side, char *howmny, logical *select,
+ integer *n, real *t, integer *ldt, real *vl, integer *ldvl, real *vr,
+ integer *ldvr, integer *mm, integer *m, real *work, integer *info)
+{
+ /* System generated locals */
+ integer t_dim1, t_offset, vl_dim1, vl_offset, vr_dim1, vr_offset, i__1,
+ i__2, i__3;
+ real r__1, r__2, r__3, r__4;
+
+ /* Builtin functions */
+ double sqrt(doublereal);
+
+ /* Local variables */
+ static integer i__, j, k;
+ static real x[4] /* was [2][2] */;
+ static integer j1, j2, n2, ii, ki, ip, is;
+ static real wi, wr, rec, ulp, beta, emax;
+ static logical pair, allv;
+ static integer ierr;
+ static real unfl, ovfl, smin;
+ extern doublereal sdot_(integer *, real *, integer *, real *, integer *);
+ static logical over;
+ static real vmax;
+ static integer jnxt;
+ static real scale;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static real remax;
+ static logical leftv;
+ extern /* Subroutine */ int sgemv_(char *, integer *, integer *, real *,
+ real *, integer *, real *, integer *, real *, real *, integer *);
+ static logical bothv;
+ static real vcrit;
+ static logical somev;
+ extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
+ integer *);
+ static real xnorm;
+ extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
+ real *, integer *), slaln2_(logical *, integer *, integer *, real
+ *, real *, real *, integer *, real *, real *, real *, integer *,
+ real *, real *, real *, integer *, real *, real *, integer *),
+ slabad_(real *, real *);
+ extern doublereal slamch_(char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *);
+ static real bignum;
+ extern integer isamax_(integer *, real *, integer *);
+ static logical rightv;
+ static real smlnum;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ June 30, 1999
+
+
+ Purpose
+ =======
+
+ STREVC computes some or all of the right and/or left eigenvectors of
+ a real upper quasi-triangular matrix T.
+
+ The right eigenvector x and the left eigenvector y of T corresponding
+ to an eigenvalue w are defined by:
+
+ T*x = w*x, y'*T = w*y'
+
+ where y' denotes the conjugate transpose of the vector y.
+
+ If all eigenvectors are requested, the routine may either return the
+ matrices X and/or Y of right or left eigenvectors of T, or the
+ products Q*X and/or Q*Y, where Q is an input orthogonal
+ matrix. If T was obtained from the real-Schur factorization of an
+ original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of
+ right or left eigenvectors of A.
+
+ T must be in Schur canonical form (as returned by SHSEQR), that is,
+ block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each
+ 2-by-2 diagonal block has its diagonal elements equal and its
+ off-diagonal elements of opposite sign. Corresponding to each 2-by-2
+ diagonal block is a complex conjugate pair of eigenvalues and
+ eigenvectors; only one eigenvector of the pair is computed, namely
+ the one corresponding to the eigenvalue with positive imaginary part.
+
+ Arguments
+ =========
+
+ SIDE (input) CHARACTER*1
+ = 'R': compute right eigenvectors only;
+ = 'L': compute left eigenvectors only;
+ = 'B': compute both right and left eigenvectors.
+
+ HOWMNY (input) CHARACTER*1
+ = 'A': compute all right and/or left eigenvectors;
+ = 'B': compute all right and/or left eigenvectors,
+ and backtransform them using the input matrices
+ supplied in VR and/or VL;
+ = 'S': compute selected right and/or left eigenvectors,
+ specified by the logical array SELECT.
+
+ SELECT (input/output) LOGICAL array, dimension (N)
+ If HOWMNY = 'S', SELECT specifies the eigenvectors to be
+ computed.
+ If HOWMNY = 'A' or 'B', SELECT is not referenced.
+ To select the real eigenvector corresponding to a real
+ eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select
+ the complex eigenvector corresponding to a complex conjugate
+ pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be
+ set to .TRUE.; then on exit SELECT(j) is .TRUE. and
+ SELECT(j+1) is .FALSE..
+
+ N (input) INTEGER
+ The order of the matrix T. N >= 0.
+
+ T (input) REAL array, dimension (LDT,N)
+ The upper quasi-triangular matrix T in Schur canonical form.
+
+ LDT (input) INTEGER
+ The leading dimension of the array T. LDT >= max(1,N).
+
+ VL (input/output) REAL array, dimension (LDVL,MM)
+ On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by SHSEQR).
+ On exit, if SIDE = 'L' or 'B', VL contains:
+ if HOWMNY = 'A', the matrix Y of left eigenvectors of T;
+ VL has the same quasi-lower triangular form
+ as T'. If T(i,i) is a real eigenvalue, then
+ the i-th column VL(i) of VL is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VL(i)+sqrt(-1)*VL(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*Y;
+ if HOWMNY = 'S', the left eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VL, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part, and the second the imaginary part.
+ If SIDE = 'R', VL is not referenced.
+
+ LDVL (input) INTEGER
+ The leading dimension of the array VL. LDVL >= max(1,N) if
+ SIDE = 'L' or 'B'; LDVL >= 1 otherwise.
+
+ VR (input/output) REAL array, dimension (LDVR,MM)
+ On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must
+ contain an N-by-N matrix Q (usually the orthogonal matrix Q
+ of Schur vectors returned by SHSEQR).
+ On exit, if SIDE = 'R' or 'B', VR contains:
+ if HOWMNY = 'A', the matrix X of right eigenvectors of T;
+ VR has the same quasi-upper triangular form
+ as T. If T(i,i) is a real eigenvalue, then
+ the i-th column VR(i) of VR is its
+ corresponding eigenvector. If T(i:i+1,i:i+1)
+ is a 2-by-2 block whose eigenvalues are
+ complex-conjugate eigenvalues of T, then
+ VR(i)+sqrt(-1)*VR(i+1) is the complex
+ eigenvector corresponding to the eigenvalue
+ with positive real part.
+ if HOWMNY = 'B', the matrix Q*X;
+ if HOWMNY = 'S', the right eigenvectors of T specified by
+ SELECT, stored consecutively in the columns
+ of VR, in the same order as their
+ eigenvalues.
+ A complex eigenvector corresponding to a complex eigenvalue
+ is stored in two consecutive columns, the first holding the
+ real part and the second the imaginary part.
+ If SIDE = 'L', VR is not referenced.
+
+ LDVR (input) INTEGER
+ The leading dimension of the array VR. LDVR >= max(1,N) if
+ SIDE = 'R' or 'B'; LDVR >= 1 otherwise.
+
+ MM (input) INTEGER
+ The number of columns in the arrays VL and/or VR. MM >= M.
+
+ M (output) INTEGER
+ The number of columns in the arrays VL and/or VR actually
+ used to store the eigenvectors.
+ If HOWMNY = 'A' or 'B', M is set to N.
+ Each selected real eigenvector occupies one column and each
+ selected complex eigenvector occupies two columns.
+
+ WORK (workspace) REAL array, dimension (3*N)
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ Further Details
+ ===============
+
+ The algorithm used in this program is basically backward (forward)
+ substitution, with scaling to make the the code robust against
+ possible overflow.
+
+ Each eigenvector is normalized so that the element of largest
+ magnitude has magnitude 1; here the magnitude of a complex number
+ (x,y) is taken to be |x| + |y|.
+
+ =====================================================================
+
+
+ Decode and test the input parameters
+*/
+
+ /* Parameter adjustments */
+ --select;
+ t_dim1 = *ldt;
+ t_offset = 1 + t_dim1;
+ t -= t_offset;
+ vl_dim1 = *ldvl;
+ vl_offset = 1 + vl_dim1;
+ vl -= vl_offset;
+ vr_dim1 = *ldvr;
+ vr_offset = 1 + vr_dim1;
+ vr -= vr_offset;
+ --work;
+
+ /* Function Body */
+ bothv = lsame_(side, "B");
+ rightv = (lsame_(side, "R")) || (bothv);
+ leftv = (lsame_(side, "L")) || (bothv);
+
+ allv = lsame_(howmny, "A");
+ over = lsame_(howmny, "B");
+ somev = lsame_(howmny, "S");
+
+ *info = 0;
+ if (! rightv && ! leftv) {
+ *info = -1;
+ } else if (! allv && ! over && ! somev) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -4;
+ } else if (*ldt < max(1,*n)) {
+ *info = -6;
+ } else if ((*ldvl < 1) || (leftv && *ldvl < *n)) {
+ *info = -8;
+ } else if ((*ldvr < 1) || (rightv && *ldvr < *n)) {
+ *info = -10;
+ } else {
+
+/*
+ Set M to the number of columns required to store the selected
+ eigenvectors, standardize the array SELECT if necessary, and
+ test MM.
+*/
+
+ if (somev) {
+ *m = 0;
+ pair = FALSE_;
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (pair) {
+ pair = FALSE_;
+ select[j] = FALSE_;
+ } else {
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] == 0.f) {
+ if (select[j]) {
+ ++(*m);
+ }
+ } else {
+ pair = TRUE_;
+ if ((select[j]) || (select[j + 1])) {
+ select[j] = TRUE_;
+ *m += 2;
+ }
+ }
+ } else {
+ if (select[*n]) {
+ ++(*m);
+ }
+ }
+ }
+/* L10: */
+ }
+ } else {
+ *m = *n;
+ }
+
+ if (*mm < *m) {
+ *info = -11;
+ }
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STREVC", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible. */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Set the constants to control overflow. */
+
+ unfl = slamch_("Safe minimum");
+ ovfl = 1.f / unfl;
+ slabad_(&unfl, &ovfl);
+ ulp = slamch_("Precision");
+ smlnum = unfl * (*n / ulp);
+ bignum = (1.f - ulp) / smlnum;
+
+/*
+ Compute 1-norm of each column of strictly upper triangular
+ part of T to control overflow in triangular solver.
+*/
+
+ work[1] = 0.f;
+ i__1 = *n;
+ for (j = 2; j <= i__1; ++j) {
+ work[j] = 0.f;
+ i__2 = j - 1;
+ for (i__ = 1; i__ <= i__2; ++i__) {
+ work[j] += (r__1 = t[i__ + j * t_dim1], dabs(r__1));
+/* L20: */
+ }
+/* L30: */
+ }
+
+/*
+ Index IP is used to specify the real or complex eigenvalue:
+ IP = 0, real eigenvalue,
+ 1, first of conjugate complex pair: (wr,wi)
+ -1, second of conjugate complex pair: (wr,wi)
+*/
+
+ n2 = (*n) << (1);
+
+ if (rightv) {
+
+/* Compute right eigenvectors. */
+
+ ip = 0;
+ is = *m;
+ for (ki = *n; ki >= 1; --ki) {
+
+ if (ip == 1) {
+ goto L130;
+ }
+ if (ki == 1) {
+ goto L40;
+ }
+ if (t[ki + (ki - 1) * t_dim1] == 0.f) {
+ goto L40;
+ }
+ ip = -1;
+
+L40:
+ if (somev) {
+ if (ip == 0) {
+ if (! select[ki]) {
+ goto L130;
+ }
+ } else {
+ if (! select[ki - 1]) {
+ goto L130;
+ }
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.f;
+ if (ip != 0) {
+ wi = sqrt((r__1 = t[ki + (ki - 1) * t_dim1], dabs(r__1))) *
+ sqrt((r__2 = t[ki - 1 + ki * t_dim1], dabs(r__2)));
+ }
+/* Computing MAX */
+ r__1 = ulp * (dabs(wr) + dabs(wi));
+ smin = dmax(r__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real right eigenvector */
+
+ work[ki + *n] = 1.f;
+
+/* Form right-hand side */
+
+ i__1 = ki - 1;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -t[k + ki * t_dim1];
+/* L50: */
+ }
+
+/*
+ Solve the upper quasi-triangular system:
+ (T(1:KI-1,1:KI-1) - WR)*X = SCALE*WORK.
+*/
+
+ jnxt = ki - 1;
+ for (j = ki - 1; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L60;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ slaln2_(&c_false, &c__1, &c__1, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &c_b320, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/*
+ Scale X(1,1) to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.f) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+
+/* Update right-hand side */
+
+ i__1 = j - 1;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ slaln2_(&c_false, &c__2, &c__1, &smin, &c_b1011, &t[j
+ - 1 + (j - 1) * t_dim1], ldt, &c_b1011, &
+ c_b1011, &work[j - 1 + *n], n, &wr, &c_b320,
+ x, &c__2, &scale, &xnorm, &ierr);
+
+/*
+ Scale X(1,1) and X(2,1) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.f) {
+/* Computing MAX */
+ r__1 = work[j - 1], r__2 = work[j];
+ beta = dmax(r__1,r__2);
+ if (beta > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[1] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+
+/* Update right-hand side */
+
+ i__1 = j - 2;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[1];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ }
+L60:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ scopy_(&ki, &work[*n + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ ii = isamax_(&ki, &vr[is * vr_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vr[ii + is * vr_dim1], dabs(r__1));
+ sscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + is * vr_dim1] = 0.f;
+/* L70: */
+ }
+ } else {
+ if (ki > 1) {
+ i__1 = ki - 1;
+ sgemv_("N", n, &i__1, &c_b1011, &vr[vr_offset], ldvr,
+ &work[*n + 1], &c__1, &work[ki + *n], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ }
+
+ ii = isamax_(n, &vr[ki * vr_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vr[ii + ki * vr_dim1], dabs(r__1));
+ sscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+
+ } else {
+
+/*
+ Complex right eigenvector.
+
+ Initial solve
+ [ (T(KI-1,KI-1) T(KI-1,KI) ) - (WR + I* WI)]*X = 0.
+ [ (T(KI,KI-1) T(KI,KI) ) ]
+*/
+
+ if ((r__1 = t[ki - 1 + ki * t_dim1], dabs(r__1)) >= (r__2 = t[
+ ki + (ki - 1) * t_dim1], dabs(r__2))) {
+ work[ki - 1 + *n] = 1.f;
+ work[ki + n2] = wi / t[ki - 1 + ki * t_dim1];
+ } else {
+ work[ki - 1 + *n] = -wi / t[ki + (ki - 1) * t_dim1];
+ work[ki + n2] = 1.f;
+ }
+ work[ki + *n] = 0.f;
+ work[ki - 1 + n2] = 0.f;
+
+/* Form right-hand side */
+
+ i__1 = ki - 2;
+ for (k = 1; k <= i__1; ++k) {
+ work[k + *n] = -work[ki - 1 + *n] * t[k + (ki - 1) *
+ t_dim1];
+ work[k + n2] = -work[ki + n2] * t[k + ki * t_dim1];
+/* L80: */
+ }
+
+/*
+ Solve upper quasi-triangular system:
+ (T(1:KI-2,1:KI-2) - (WR+i*WI))*X = SCALE*(WORK+i*WORK2)
+*/
+
+ jnxt = ki - 2;
+ for (j = ki - 2; j >= 1; --j) {
+ if (j > jnxt) {
+ goto L90;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j - 1;
+ if (j > 1) {
+ if (t[j + (j - 1) * t_dim1] != 0.f) {
+ j1 = j - 1;
+ jnxt = j - 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/* 1-by-1 diagonal block */
+
+ slaln2_(&c_false, &c__1, &c__2, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &wi, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/*
+ Scale X(1,1) and X(1,2) to avoid overflow when
+ updating the right-hand side.
+*/
+
+ if (xnorm > 1.f) {
+ if (work[j] > bignum / xnorm) {
+ x[0] /= xnorm;
+ x[2] /= xnorm;
+ scale /= xnorm;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ sscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+
+/* Update the right-hand side */
+
+ i__1 = j - 1;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 1;
+ r__1 = -x[2];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+
+ } else {
+
+/* 2-by-2 diagonal block */
+
+ slaln2_(&c_false, &c__2, &c__2, &smin, &c_b1011, &t[j
+ - 1 + (j - 1) * t_dim1], ldt, &c_b1011, &
+ c_b1011, &work[j - 1 + *n], n, &wr, &wi, x, &
+ c__2, &scale, &xnorm, &ierr);
+
+/*
+ Scale X to avoid overflow when updating
+ the right-hand side.
+*/
+
+ if (xnorm > 1.f) {
+/* Computing MAX */
+ r__1 = work[j - 1], r__2 = work[j];
+ beta = dmax(r__1,r__2);
+ if (beta > bignum / xnorm) {
+ rec = 1.f / xnorm;
+ x[0] *= rec;
+ x[2] *= rec;
+ x[1] *= rec;
+ x[3] *= rec;
+ scale *= rec;
+ }
+ }
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ sscal_(&ki, &scale, &work[*n + 1], &c__1);
+ sscal_(&ki, &scale, &work[n2 + 1], &c__1);
+ }
+ work[j - 1 + *n] = x[0];
+ work[j + *n] = x[1];
+ work[j - 1 + n2] = x[2];
+ work[j + n2] = x[3];
+
+/* Update the right-hand side */
+
+ i__1 = j - 2;
+ r__1 = -x[0];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[*n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[1];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ *n + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[2];
+ saxpy_(&i__1, &r__1, &t[(j - 1) * t_dim1 + 1], &c__1,
+ &work[n2 + 1], &c__1);
+ i__1 = j - 2;
+ r__1 = -x[3];
+ saxpy_(&i__1, &r__1, &t[j * t_dim1 + 1], &c__1, &work[
+ n2 + 1], &c__1);
+ }
+L90:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VR and normalize. */
+
+ if (! over) {
+ scopy_(&ki, &work[*n + 1], &c__1, &vr[(is - 1) * vr_dim1
+ + 1], &c__1);
+ scopy_(&ki, &work[n2 + 1], &c__1, &vr[is * vr_dim1 + 1], &
+ c__1);
+
+ emax = 0.f;
+ i__1 = ki;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vr[k + (is - 1) * vr_dim1]
+ , dabs(r__1)) + (r__2 = vr[k + is * vr_dim1],
+ dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L100: */
+ }
+
+ remax = 1.f / emax;
+ sscal_(&ki, &remax, &vr[(is - 1) * vr_dim1 + 1], &c__1);
+ sscal_(&ki, &remax, &vr[is * vr_dim1 + 1], &c__1);
+
+ i__1 = *n;
+ for (k = ki + 1; k <= i__1; ++k) {
+ vr[k + (is - 1) * vr_dim1] = 0.f;
+ vr[k + is * vr_dim1] = 0.f;
+/* L110: */
+ }
+
+ } else {
+
+ if (ki > 2) {
+ i__1 = ki - 2;
+ sgemv_("N", n, &i__1, &c_b1011, &vr[vr_offset], ldvr,
+ &work[*n + 1], &c__1, &work[ki - 1 + *n], &vr[
+ (ki - 1) * vr_dim1 + 1], &c__1);
+ i__1 = ki - 2;
+ sgemv_("N", n, &i__1, &c_b1011, &vr[vr_offset], ldvr,
+ &work[n2 + 1], &c__1, &work[ki + n2], &vr[ki *
+ vr_dim1 + 1], &c__1);
+ } else {
+ sscal_(n, &work[ki - 1 + *n], &vr[(ki - 1) * vr_dim1
+ + 1], &c__1);
+ sscal_(n, &work[ki + n2], &vr[ki * vr_dim1 + 1], &
+ c__1);
+ }
+
+ emax = 0.f;
+ i__1 = *n;
+ for (k = 1; k <= i__1; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vr[k + (ki - 1) * vr_dim1]
+ , dabs(r__1)) + (r__2 = vr[k + ki * vr_dim1],
+ dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L120: */
+ }
+ remax = 1.f / emax;
+ sscal_(n, &remax, &vr[(ki - 1) * vr_dim1 + 1], &c__1);
+ sscal_(n, &remax, &vr[ki * vr_dim1 + 1], &c__1);
+ }
+ }
+
+ --is;
+ if (ip != 0) {
+ --is;
+ }
+L130:
+ if (ip == 1) {
+ ip = 0;
+ }
+ if (ip == -1) {
+ ip = 1;
+ }
+/* L140: */
+ }
+ }
+
+ if (leftv) {
+
+/* Compute left eigenvectors. */
+
+ ip = 0;
+ is = 1;
+ i__1 = *n;
+ for (ki = 1; ki <= i__1; ++ki) {
+
+ if (ip == -1) {
+ goto L250;
+ }
+ if (ki == *n) {
+ goto L150;
+ }
+ if (t[ki + 1 + ki * t_dim1] == 0.f) {
+ goto L150;
+ }
+ ip = 1;
+
+L150:
+ if (somev) {
+ if (! select[ki]) {
+ goto L250;
+ }
+ }
+
+/* Compute the KI-th eigenvalue (WR,WI). */
+
+ wr = t[ki + ki * t_dim1];
+ wi = 0.f;
+ if (ip != 0) {
+ wi = sqrt((r__1 = t[ki + (ki + 1) * t_dim1], dabs(r__1))) *
+ sqrt((r__2 = t[ki + 1 + ki * t_dim1], dabs(r__2)));
+ }
+/* Computing MAX */
+ r__1 = ulp * (dabs(wr) + dabs(wi));
+ smin = dmax(r__1,smlnum);
+
+ if (ip == 0) {
+
+/* Real left eigenvector. */
+
+ work[ki + *n] = 1.f;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 1; k <= i__2; ++k) {
+ work[k + *n] = -t[ki + k * t_dim1];
+/* L160: */
+ }
+
+/*
+ Solve the quasi-triangular system:
+ (T(KI+1:N,KI+1:N) - WR)'*X = SCALE*WORK
+*/
+
+ vmax = 1.f;
+ vcrit = bignum;
+
+ jnxt = ki + 1;
+ i__2 = *n;
+ for (j = ki + 1; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L170;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+/* Solve (T(J,J)-WR)'*X = WORK */
+
+ slaln2_(&c_false, &c__1, &c__1, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &c_b320, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+/* Computing MAX */
+ r__2 = (r__1 = work[j + *n], dabs(r__1));
+ vmax = dmax(r__2,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side.
+
+ Computing MAX
+*/
+ r__1 = work[j], r__2 = work[j + 1];
+ beta = dmax(r__1,r__2);
+ if (beta > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 1;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 1 + j * t_dim1],
+ &c__1, &work[ki + 1 + *n], &c__1);
+
+ i__3 = j - ki - 1;
+ work[j + 1 + *n] -= sdot_(&i__3, &t[ki + 1 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 1 + *n], &c__1);
+
+/*
+ Solve
+ [T(J,J)-WR T(J,J+1) ]'* X = SCALE*( WORK1 )
+ [T(J+1,J) T(J+1,J+1)-WR] ( WORK2 )
+*/
+
+ slaln2_(&c_true, &c__2, &c__1, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &c_b320, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + 1 + *n] = x[1];
+
+/* Computing MAX */
+ r__3 = (r__1 = work[j + *n], dabs(r__1)), r__4 = (
+ r__2 = work[j + 1 + *n], dabs(r__2)), r__3 =
+ max(r__3,r__4);
+ vmax = dmax(r__3,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L170:
+ ;
+ }
+
+/* Copy the vector x or Q*x to VL and normalize. */
+
+ if (! over) {
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+
+ i__2 = *n - ki + 1;
+ ii = isamax_(&i__2, &vl[ki + is * vl_dim1], &c__1) + ki -
+ 1;
+ remax = 1.f / (r__1 = vl[ii + is * vl_dim1], dabs(r__1));
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.f;
+/* L180: */
+ }
+
+ } else {
+
+ if (ki < *n) {
+ i__2 = *n - ki;
+ sgemv_("N", n, &i__2, &c_b1011, &vl[(ki + 1) *
+ vl_dim1 + 1], ldvl, &work[ki + 1 + *n], &c__1,
+ &work[ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ }
+
+ ii = isamax_(n, &vl[ki * vl_dim1 + 1], &c__1);
+ remax = 1.f / (r__1 = vl[ii + ki * vl_dim1], dabs(r__1));
+ sscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+
+ }
+
+ } else {
+
+/*
+ Complex left eigenvector.
+
+ Initial solve:
+ ((T(KI,KI) T(KI,KI+1) )' - (WR - I* WI))*X = 0.
+ ((T(KI+1,KI) T(KI+1,KI+1)) )
+*/
+
+ if ((r__1 = t[ki + (ki + 1) * t_dim1], dabs(r__1)) >= (r__2 =
+ t[ki + 1 + ki * t_dim1], dabs(r__2))) {
+ work[ki + *n] = wi / t[ki + (ki + 1) * t_dim1];
+ work[ki + 1 + n2] = 1.f;
+ } else {
+ work[ki + *n] = 1.f;
+ work[ki + 1 + n2] = -wi / t[ki + 1 + ki * t_dim1];
+ }
+ work[ki + 1 + *n] = 0.f;
+ work[ki + n2] = 0.f;
+
+/* Form right-hand side */
+
+ i__2 = *n;
+ for (k = ki + 2; k <= i__2; ++k) {
+ work[k + *n] = -work[ki + *n] * t[ki + k * t_dim1];
+ work[k + n2] = -work[ki + 1 + n2] * t[ki + 1 + k * t_dim1]
+ ;
+/* L190: */
+ }
+
+/*
+ Solve complex quasi-triangular system:
+ ( T(KI+2,N:KI+2,N) - (WR-i*WI) )*X = WORK1+i*WORK2
+*/
+
+ vmax = 1.f;
+ vcrit = bignum;
+
+ jnxt = ki + 2;
+ i__2 = *n;
+ for (j = ki + 2; j <= i__2; ++j) {
+ if (j < jnxt) {
+ goto L200;
+ }
+ j1 = j;
+ j2 = j;
+ jnxt = j + 1;
+ if (j < *n) {
+ if (t[j + 1 + j * t_dim1] != 0.f) {
+ j2 = j + 1;
+ jnxt = j + 2;
+ }
+ }
+
+ if (j1 == j2) {
+
+/*
+ 1-by-1 diagonal block
+
+ Scale if necessary to avoid overflow when
+ forming the right-hand side elements.
+*/
+
+ if (work[j] > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+ i__3 = j - ki - 2;
+ work[j + n2] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+/* Solve (T(J,J)-(WR-i*WI))*(X11+i*X12)= WK+I*WK2 */
+
+ r__1 = -wi;
+ slaln2_(&c_false, &c__1, &c__2, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &r__1, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+/* Computing MAX */
+ r__3 = (r__1 = work[j + *n], dabs(r__1)), r__4 = (
+ r__2 = work[j + n2], dabs(r__2)), r__3 = max(
+ r__3,r__4);
+ vmax = dmax(r__3,vmax);
+ vcrit = bignum / vmax;
+
+ } else {
+
+/*
+ 2-by-2 diagonal block
+
+ Scale if necessary to avoid overflow when forming
+ the right-hand side elements.
+
+ Computing MAX
+*/
+ r__1 = work[j], r__2 = work[j + 1];
+ beta = dmax(r__1,r__2);
+ if (beta > vcrit) {
+ rec = 1.f / vmax;
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &rec, &work[ki + n2], &c__1);
+ vmax = 1.f;
+ vcrit = bignum;
+ }
+
+ i__3 = j - ki - 2;
+ work[j + *n] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + n2] -= sdot_(&i__3, &t[ki + 2 + j * t_dim1],
+ &c__1, &work[ki + 2 + n2], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + *n] -= sdot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + *n], &c__1);
+
+ i__3 = j - ki - 2;
+ work[j + 1 + n2] -= sdot_(&i__3, &t[ki + 2 + (j + 1) *
+ t_dim1], &c__1, &work[ki + 2 + n2], &c__1);
+
+/*
+ Solve 2-by-2 complex linear equation
+ ([T(j,j) T(j,j+1) ]'-(wr-i*wi)*I)*X = SCALE*B
+ ([T(j+1,j) T(j+1,j+1)] )
+*/
+
+ r__1 = -wi;
+ slaln2_(&c_true, &c__2, &c__2, &smin, &c_b1011, &t[j
+ + j * t_dim1], ldt, &c_b1011, &c_b1011, &work[
+ j + *n], n, &wr, &r__1, x, &c__2, &scale, &
+ xnorm, &ierr);
+
+/* Scale if necessary */
+
+ if (scale != 1.f) {
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + *n], &c__1);
+ i__3 = *n - ki + 1;
+ sscal_(&i__3, &scale, &work[ki + n2], &c__1);
+ }
+ work[j + *n] = x[0];
+ work[j + n2] = x[2];
+ work[j + 1 + *n] = x[1];
+ work[j + 1 + n2] = x[3];
+/* Computing MAX */
+ r__1 = dabs(x[0]), r__2 = dabs(x[2]), r__1 = max(r__1,
+ r__2), r__2 = dabs(x[1]), r__1 = max(r__1,
+ r__2), r__2 = dabs(x[3]), r__1 = max(r__1,
+ r__2);
+ vmax = dmax(r__1,vmax);
+ vcrit = bignum / vmax;
+
+ }
+L200:
+ ;
+ }
+
+/*
+ Copy the vector x or Q*x to VL and normalize.
+
+ L210:
+*/
+ if (! over) {
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + *n], &c__1, &vl[ki + is *
+ vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ scopy_(&i__2, &work[ki + n2], &c__1, &vl[ki + (is + 1) *
+ vl_dim1], &c__1);
+
+ emax = 0.f;
+ i__2 = *n;
+ for (k = ki; k <= i__2; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vl[k + is * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[k + (is + 1) *
+ vl_dim1], dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L220: */
+ }
+ remax = 1.f / emax;
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + is * vl_dim1], &c__1);
+ i__2 = *n - ki + 1;
+ sscal_(&i__2, &remax, &vl[ki + (is + 1) * vl_dim1], &c__1)
+ ;
+
+ i__2 = ki - 1;
+ for (k = 1; k <= i__2; ++k) {
+ vl[k + is * vl_dim1] = 0.f;
+ vl[k + (is + 1) * vl_dim1] = 0.f;
+/* L230: */
+ }
+ } else {
+ if (ki < *n - 1) {
+ i__2 = *n - ki - 1;
+ sgemv_("N", n, &i__2, &c_b1011, &vl[(ki + 2) *
+ vl_dim1 + 1], ldvl, &work[ki + 2 + *n], &c__1,
+ &work[ki + *n], &vl[ki * vl_dim1 + 1], &c__1);
+ i__2 = *n - ki - 1;
+ sgemv_("N", n, &i__2, &c_b1011, &vl[(ki + 2) *
+ vl_dim1 + 1], ldvl, &work[ki + 2 + n2], &c__1,
+ &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1 +
+ 1], &c__1);
+ } else {
+ sscal_(n, &work[ki + *n], &vl[ki * vl_dim1 + 1], &
+ c__1);
+ sscal_(n, &work[ki + 1 + n2], &vl[(ki + 1) * vl_dim1
+ + 1], &c__1);
+ }
+
+ emax = 0.f;
+ i__2 = *n;
+ for (k = 1; k <= i__2; ++k) {
+/* Computing MAX */
+ r__3 = emax, r__4 = (r__1 = vl[k + ki * vl_dim1],
+ dabs(r__1)) + (r__2 = vl[k + (ki + 1) *
+ vl_dim1], dabs(r__2));
+ emax = dmax(r__3,r__4);
+/* L240: */
+ }
+ remax = 1.f / emax;
+ sscal_(n, &remax, &vl[ki * vl_dim1 + 1], &c__1);
+ sscal_(n, &remax, &vl[(ki + 1) * vl_dim1 + 1], &c__1);
+
+ }
+
+ }
+
+ ++is;
+ if (ip != 0) {
+ ++is;
+ }
+L250:
+ if (ip == -1) {
+ ip = 0;
+ }
+ if (ip == 1) {
+ ip = -1;
+ }
+
+/* L260: */
+ }
+
+ }
+
+ return 0;
+
+/* End of STREVC */
+
+} /* strevc_ */
+
+/* Subroutine */ int strti2_(char *uplo, char *diag, integer *n, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+
+ /* Local variables */
+ static integer j;
+ static real ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int strmv_(char *, char *, char *, integer *,
+ real *, integer *, real *, integer *),
+ xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ February 29, 1992
+
+
+ Purpose
+ =======
+
+ STRTI2 computes the inverse of a real upper or lower triangular
+ matrix.
+
+ This is the Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading n by n upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1.f / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.f;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ strmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ sscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ a[j + j * a_dim1] = 1.f / a[j + j * a_dim1];
+ ajj = -a[j + j * a_dim1];
+ } else {
+ ajj = -1.f;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ strmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ sscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of STRTI2 */
+
+} /* strti2_ */
+
+/* Subroutine */ int strtri_(char *uplo, char *diag, integer *n, real *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int strmm_(char *, char *, char *, char *,
+ integer *, integer *, real *, real *, integer *, real *, integer *
+ ), strsm_(char *, char *, char *,
+ char *, integer *, integer *, real *, real *, integer *, real *,
+ integer *), strti2_(char *, char *
+ , integer *, real *, integer *, integer *),
+ xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ STRTRI computes the inverse of a real upper or lower triangular
+ matrix A.
+
+ This is the Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': A is upper triangular;
+ = 'L': A is lower triangular.
+
+ DIAG (input) CHARACTER*1
+ = 'N': A is non-unit triangular;
+ = 'U': A is unit triangular.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) REAL array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, A(i,i) is exactly zero. The triangular
+ matrix is singular and its inverse can not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("STRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ if (a[*info + *info * a_dim1] == 0.f) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/*
+ Determine the block size for this environment.
+
+ Writing concatenation
+*/
+ i__2[0] = 1, a__1[0] = uplo;
+ i__2[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__2, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "STRTRI", ch__1, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ strti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__3 = nb;
+ for (j = 1; i__3 < 0 ? j >= i__1 : j <= i__1; j += i__3) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ strmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b1011, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ strsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b1290, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__3 = -nb;
+ for (j = nn; i__3 < 0 ? j >= 1 : j <= 1; j += i__3) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ strmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b1011, &a[j + jb + (j + jb) * a_dim1], lda, &a[
+ j + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ strsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b1290, &a[j + j * a_dim1], lda, &a[j + jb + j
+ * a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ strti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of STRTRI */
+
+} /* strtri_ */
+
diff --git a/numpy/linalg/f2c.h b/numpy/linalg/lapack_lite/f2c.h
index e27d7ae57..e27d7ae57 100644
--- a/numpy/linalg/f2c.h
+++ b/numpy/linalg/lapack_lite/f2c.h
diff --git a/numpy/linalg/f2c_lite.c b/numpy/linalg/lapack_lite/f2c_lite.c
index 6402271c9..c0814b3bf 100644
--- a/numpy/linalg/f2c_lite.c
+++ b/numpy/linalg/lapack_lite/f2c_lite.c
@@ -21,6 +21,34 @@ s_rnge(char *var, int index, char *routine, int lineno)
abort();
}
+#ifdef KR_headers
+extern float sqrtf();
+double f__cabsf(real, imag) float real, imag;
+#else
+#undef abs
+
+double f__cabsf(float real, float imag)
+#endif
+{
+float temp;
+
+if(real < 0.0f)
+ real = -real;
+if(imag < 0.0f)
+ imag = -imag;
+if(imag > real){
+ temp = real;
+ real = imag;
+ imag = temp;
+}
+if((imag+real) == real)
+ return((float)real);
+
+temp = imag/real;
+temp = real*sqrtf(1.0 + temp*temp); /*overflow!!*/
+return(temp);
+}
+
#ifdef KR_headers
extern double sqrt();
@@ -50,6 +78,16 @@ temp = real*sqrt(1.0 + temp*temp); /*overflow!!*/
return(temp);
}
+ VOID
+#ifdef KR_headers
+r_cnjg(r, z) complex *r, *z;
+#else
+r_cnjg(complex *r, complex *z)
+#endif
+{
+r->r = z->r;
+r->i = - z->i;
+}
VOID
#ifdef KR_headers
@@ -64,6 +102,15 @@ r->i = - z->i;
#ifdef KR_headers
+float r_imag(z) complex *z;
+#else
+float r_imag(complex *z)
+#endif
+{
+return(z->i);
+}
+
+#ifdef KR_headers
double d_imag(z) doublecomplex *z;
#else
double d_imag(doublecomplex *z)
@@ -76,6 +123,18 @@ return(z->i);
#define log10e 0.43429448190325182765
#ifdef KR_headers
+float logf();
+float r_lg10(x) real *x;
+#else
+#undef abs
+
+float r_lg10(real *x)
+#endif
+{
+return( log10e * logf(*x) );
+}
+
+#ifdef KR_headers
double log();
double d_lg10(x) doublereal *x;
#else
@@ -87,6 +146,16 @@ double d_lg10(doublereal *x)
return( log10e * log(*x) );
}
+#ifdef KR_headers
+double r_sign(a,b) real *a, *b;
+#else
+double r_sign(real *a, real *b)
+#endif
+{
+float x;
+x = (*a >= 0.0f ? *a : - *a);
+return( *b >= 0.0f ? x : -x);
+}
#ifdef KR_headers
double d_sign(a,b) doublereal *a, *b;
@@ -128,6 +197,40 @@ return(pow(*ap, *bp) );
#ifdef KR_headers
+double pow_ri(ap, bp) real *ap; integer *bp;
+#else
+double pow_ri(real *ap, integer *bp)
+#endif
+{
+float pow, x;
+integer n;
+unsigned long u;
+
+pow = 1;
+x = *ap;
+n = *bp;
+
+if(n != 0)
+ {
+ if(n < 0)
+ {
+ n = -n;
+ x = 1.0f/x;
+ }
+ for(u = n; ; )
+ {
+ if(u & 01)
+ pow *= x;
+ if(u >>= 1)
+ x *= x;
+ else
+ break;
+ }
+ }
+return(pow);
+}
+
+#ifdef KR_headers
double pow_di(ap, bp) doublereal *ap; integer *bp;
#else
double pow_di(doublereal *ap, integer *bp)
@@ -332,6 +435,17 @@ void s_copy(register char *a, register char *b, ftnlen la, ftnlen lb)
#ifdef KR_headers
+double f__cabsf();
+double c_abs(z) complex *z;
+#else
+double f__cabsf(float, float);
+double c_abs(complex *z)
+#endif
+{
+return( f__cabsf( z->r, z->i ) );
+}
+
+#ifdef KR_headers
double f__cabs();
double z_abs(z) doublecomplex *z;
#else
@@ -345,6 +459,42 @@ return( f__cabs( z->r, z->i ) );
#ifdef KR_headers
extern void sig_die();
+VOID c_div(c, a, b) complex *a, *b, *c;
+#else
+extern void sig_die(char*, int);
+void c_div(complex *c, complex *a, complex *b)
+#endif
+{
+float ratio, den;
+float abr, abi;
+
+if( (abr = b->r) < 0.f)
+ abr = - abr;
+if( (abi = b->i) < 0.f)
+ abi = - abi;
+if( abr <= abi )
+ {
+ /*Let IEEE Infinties handle this ;( */
+ /*if(abi == 0)
+ sig_die("complex division by zero", 1);*/
+ ratio = b->r / b->i ;
+ den = b->i * (1 + ratio*ratio);
+ c->r = (a->r*ratio + a->i) / den;
+ c->i = (a->i*ratio - a->r) / den;
+ }
+
+else
+ {
+ ratio = b->i / b->r ;
+ den = b->r * (1.f + ratio*ratio);
+ c->r = (a->r + a->i*ratio) / den;
+ c->i = (a->i - a->r*ratio) / den;
+ }
+
+}
+
+#ifdef KR_headers
+extern void sig_die();
VOID z_div(c, a, b) doublecomplex *a, *b, *c;
#else
extern void sig_die(char*, int);
@@ -381,6 +531,35 @@ else
#ifdef KR_headers
+float sqrtf(), f__cabsf();
+VOID c_sqrt(r, z) complex *r, *z;
+#else
+#undef abs
+
+extern double f__cabsf(float, float);
+void c_sqrt(complex *r, complex *z)
+#endif
+{
+float mag;
+
+if( (mag = f__cabsf(z->r, z->i)) == 0.f)
+ r->r = r->i = 0.f;
+else if(z->r > 0.0f)
+ {
+ r->r = sqrtf(0.5f * (mag + z->r) );
+ r->i = z->i / r->r / 2.0f;
+ }
+else
+ {
+ r->i = sqrtf(0.5f * (mag - z->r) );
+ if(z->i < 0.0f)
+ r->i = - r->i;
+ r->r = z->i / r->i / 2.0f;
+ }
+}
+
+
+#ifdef KR_headers
double sqrt(), f__cabs();
VOID z_sqrt(r, z) doublecomplex *r, *z;
#else
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index 7be986a8e..3b6ac70f0 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import re
import itertools
@@ -34,14 +36,19 @@ class LineIterator(object):
object.__init__(self)
self.iterable = iter(iterable)
self.lineno = 0
+
def __iter__(self):
return self
- def next(self):
+
+ def __next__(self):
self.lineno += 1
- line = self.iterable.next()
+ line = next(self.iterable)
line = line.rstrip()
return line
+ next = __next__
+
+
class PushbackIterator(object):
"""PushbackIterator(iterable)
@@ -57,15 +64,18 @@ class PushbackIterator(object):
def __iter__(self):
return self
- def next(self):
+ def __next__(self):
if self.buffer:
return self.buffer.pop()
else:
- return self.iterable.next()
+ return next(self.iterable)
def pushback(self, item):
self.buffer.append(item)
+ next = __next__
+
+
def fortranSourceLines(fo):
"""Return an iterator over statement lines of a Fortran source file.
diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py
index e857866e1..6aa5c3e80 100755
--- a/numpy/linalg/lapack_lite/make_lite.py
+++ b/numpy/linalg/lapack_lite/make_lite.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
import sys, os
import fortran
@@ -110,12 +111,12 @@ class FortranLibrary(object):
def allRoutineNames(self):
"""Return the names of all the routines.
"""
- return self.names_to_routines.keys()
+ return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
- return self.names_to_routines.values()
+ return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
@@ -125,7 +126,7 @@ class FortranLibrary(object):
"""
done_this = set()
last_todo = set()
- while 1:
+ while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
@@ -150,14 +151,13 @@ class LapackLibrary(FortranLibrary):
return routine
def allRoutinesByType(self, typename):
- routines = [(r.name,r) for r in self.allRoutines() if r.type == typename]
- routines.sort()
+ routines = sorted((r.name,r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
- print desc
+ print(desc)
for r in routines:
- print '\t%s' % r.name
+ print('\t%s' % r.name)
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
@@ -235,8 +235,8 @@ def scrubF2CSource(c_file):
def main():
if len(sys.argv) != 4:
- print 'Usage: %s wrapped_routines_file lapack_dir output_dir' % \
- (sys.argv[0],)
+ print('Usage: %s wrapped_routines_file lapack_dir output_dir' % \
+ (sys.argv[0],))
return
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
@@ -248,7 +248,7 @@ def main():
dumpRoutineNames(library, output_dir)
for typename in ['blas', 'dlapack', 'zlapack']:
- print 'creating %s_lite.c ...' % typename
+ print('creating %s_lite.c ...' % typename)
routines = library.allRoutinesByType(typename)
fortran_file = os.path.join(output_dir, typename+'_lite.f')
c_file = fortran_file[:-2] + '.c'
@@ -256,7 +256,7 @@ def main():
try:
runF2C(fortran_file, output_dir)
except F2CError:
- print 'f2c failed on %s' % fortran_file
+ print('f2c failed on %s' % fortran_file)
break
scrubF2CSource(c_file)
diff --git a/numpy/linalg/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c
index 4e5a68413..bc5d41f58 100644
--- a/numpy/linalg/python_xerbla.c
+++ b/numpy/linalg/lapack_lite/python_xerbla.c
@@ -26,12 +26,16 @@ int xerbla_(char *srname, integer *info)
6 for name, 4 for param. num. */
int len = 0; /* length of subroutine name*/
+ PyGILState_STATE save;
+
while( len<6 && srname[len]!='\0' )
len++;
while( len && srname[len-1]==' ' )
len--;
- snprintf(buf, sizeof(buf), format, len, srname, *info);
+ PyOS_snprintf(buf, sizeof(buf), format, len, srname, *info);
+ save = PyGILState_Ensure();
PyErr_SetString(PyExc_ValueError, buf);
+ PyGILState_Release(save);
return 0;
}
diff --git a/numpy/linalg/zlapack_lite.c b/numpy/linalg/lapack_lite/zlapack_lite.c
index 4549f68b5..1b43c6270 100644
--- a/numpy/linalg/zlapack_lite.c
+++ b/numpy/linalg/lapack_lite/zlapack_lite.c
@@ -68,7 +68,7 @@ static doublereal c_b2210 = .5;
if (*n <= 0) {
return 0;
}
- if ((*incx == 1 && *incy == 1)) {
+ if (*incx == 1 && *incy == 1) {
goto L20;
}
@@ -218,7 +218,7 @@ L20:
/* Parameter adjustments */
--scale;
v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
+ v_offset = 1 + v_dim1;
v -= v_offset;
/* Function Body */
@@ -226,16 +226,16 @@ L20:
leftv = lsame_(side, "L");
*info = 0;
- if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
- && ! lsame_(job, "B"))) {
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
*info = -1;
- } else if ((! rightv && ! leftv)) {
+ } else if (! rightv && ! leftv) {
*info = -2;
} else if (*n < 0) {
*info = -3;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
*info = -4;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
*info = -5;
} else if (*m < 0) {
*info = -7;
@@ -266,7 +266,7 @@ L20:
/* Backward balance */
- if (lsame_(job, "S") || lsame_(job, "B")) {
+ if ((lsame_(job, "S")) || (lsame_(job, "B"))) {
if (rightv) {
i__1 = *ihi;
@@ -296,12 +296,12 @@ L20:
*/
L30:
- if (lsame_(job, "P") || lsame_(job, "B")) {
+ if ((lsame_(job, "P")) || (lsame_(job, "B"))) {
if (rightv) {
i__1 = *n;
for (ii = 1; ii <= i__1; ++ii) {
i__ = ii;
- if ((i__ >= *ilo && i__ <= *ihi)) {
+ if (i__ >= *ilo && i__ <= *ihi) {
goto L40;
}
if (i__ < *ilo) {
@@ -321,7 +321,7 @@ L40:
i__1 = *n;
for (ii = 1; ii <= i__1; ++ii) {
i__ = ii;
- if ((i__ >= *ilo && i__ <= *ihi)) {
+ if (i__ >= *ilo && i__ <= *ihi) {
goto L50;
}
if (i__ < *ilo) {
@@ -471,14 +471,14 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--scale;
/* Function Body */
*info = 0;
- if ((((! lsame_(job, "N") && ! lsame_(job, "P")) && ! lsame_(job, "S"))
- && ! lsame_(job, "B"))) {
+ if (! lsame_(job, "N") && ! lsame_(job, "P") && ! lsame_(job, "S")
+ && ! lsame_(job, "B")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
@@ -550,7 +550,7 @@ L50:
goto L60;
}
i__2 = j + i__ * a_dim1;
- if (a[i__2].r != 0. || d_imag(&a[j + i__ * a_dim1]) != 0.) {
+ if ((a[i__2].r != 0.) || (d_imag(&a[j + i__ * a_dim1]) != 0.)) {
goto L70;
}
L60:
@@ -581,7 +581,7 @@ L90:
goto L100;
}
i__3 = i__ + j * a_dim1;
- if (a[i__3].r != 0. || d_imag(&a[i__ + j * a_dim1]) != 0.) {
+ if ((a[i__3].r != 0.) || (d_imag(&a[i__ + j * a_dim1]) != 0.)) {
goto L110;
}
L100:
@@ -646,7 +646,7 @@ L150:
/* Guard against zero C or R due to underflow. */
- if (c__ == 0. || r__ == 0.) {
+ if ((c__ == 0.) || (r__ == 0.)) {
goto L200;
}
g = r__ / 8.;
@@ -657,7 +657,8 @@ L160:
d__1 = max(f,c__);
/* Computing MIN */
d__2 = min(r__,g);
- if (c__ >= g || max(d__1,ca) >= sfmax2 || min(d__2,ra) <= sfmin2) {
+ if (((c__ >= g) || (max(d__1,ca) >= sfmax2)) || (min(d__2,ra) <=
+ sfmin2)) {
goto L170;
}
f *= 8.;
@@ -673,7 +674,8 @@ L170:
L180:
/* Computing MIN */
d__1 = min(f,c__), d__1 = min(d__1,g);
- if (g < r__ || max(r__,ra) >= sfmax2 || min(d__1,ca) <= sfmin2) {
+ if (((g < r__) || (max(r__,ra) >= sfmax2)) || (min(d__1,ca) <= sfmin2)
+ ) {
goto L190;
}
f /= 8.;
@@ -690,12 +692,12 @@ L190:
if (c__ + r__ >= s * .95) {
goto L200;
}
- if ((f < 1. && scale[i__] < 1.)) {
+ if (f < 1. && scale[i__] < 1.) {
if (f * scale[i__] <= sfmin1) {
goto L200;
}
}
- if ((f > 1. && scale[i__] > 1.)) {
+ if (f > 1. && scale[i__] > 1.) {
if (scale[i__] >= sfmax1 / f) {
goto L200;
}
@@ -871,7 +873,7 @@ L210:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--d__;
--e;
@@ -1210,7 +1212,7 @@ L210:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--d__;
--e;
@@ -1237,7 +1239,7 @@ L210:
} else /* if(complicated condition) */ {
/* Computing MAX */
i__1 = max(1,*m);
- if ((*lwork < max(i__1,*n) && ! lquery)) {
+ if (*lwork < max(i__1,*n) && ! lquery) {
*info = -10;
}
}
@@ -1261,7 +1263,7 @@ L210:
ldwrkx = *m;
ldwrky = *n;
- if ((nb > 1 && nb < minmn)) {
+ if (nb > 1 && nb < minmn) {
/*
Set the crossover point NX.
@@ -1542,14 +1544,14 @@ L210:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--w;
vl_dim1 = *ldvl;
- vl_offset = 1 + vl_dim1 * 1;
+ vl_offset = 1 + vl_dim1;
vl -= vl_offset;
vr_dim1 = *ldvr;
- vr_offset = 1 + vr_dim1 * 1;
+ vr_offset = 1 + vr_dim1;
vr -= vr_offset;
--work;
--rwork;
@@ -1559,17 +1561,17 @@ L210:
lquery = *lwork == -1;
wantvl = lsame_(jobvl, "V");
wantvr = lsame_(jobvr, "V");
- if ((! wantvl && ! lsame_(jobvl, "N"))) {
+ if (! wantvl && ! lsame_(jobvl, "N")) {
*info = -1;
- } else if ((! wantvr && ! lsame_(jobvr, "N"))) {
+ } else if (! wantvr && ! lsame_(jobvr, "N")) {
*info = -2;
} else if (*n < 0) {
*info = -3;
} else if (*lda < max(1,*n)) {
*info = -5;
- } else if (*ldvl < 1 || (wantvl && *ldvl < *n)) {
+ } else if ((*ldvl < 1) || (wantvl && *ldvl < *n)) {
*info = -8;
- } else if (*ldvr < 1 || (wantvr && *ldvr < *n)) {
+ } else if ((*ldvr < 1) || (wantvr && *ldvr < *n)) {
*info = -10;
}
@@ -1587,10 +1589,10 @@ L210:
*/
minwrk = 1;
- if ((*info == 0 && (*lwork >= 1 || lquery))) {
+ if (*info == 0 && ((*lwork >= 1) || (lquery))) {
maxwrk = *n + *n * ilaenv_(&c__1, "ZGEHRD", " ", n, &c__1, n, &c__0, (
ftnlen)6, (ftnlen)1);
- if ((! wantvl && ! wantvr)) {
+ if (! wantvl && ! wantvr) {
/* Computing MAX */
i__1 = 1, i__2 = (*n) << (1);
minwrk = max(i__1,i__2);
@@ -1639,7 +1641,7 @@ L210:
}
work[1].r = (doublereal) maxwrk, work[1].i = 0.;
}
- if ((*lwork < minwrk && ! lquery)) {
+ if (*lwork < minwrk && ! lquery) {
*info = -12;
}
if (*info != 0) {
@@ -1669,7 +1671,7 @@ L210:
anrm = zlange_("M", n, n, &a[a_offset], lda, dum);
scalea = FALSE_;
- if ((anrm > 0. && anrm < smlnum)) {
+ if (anrm > 0. && anrm < smlnum) {
scalea = TRUE_;
cscale = smlnum;
} else if (anrm > bignum) {
@@ -1797,7 +1799,7 @@ L210:
goto L50;
}
- if (wantvl || wantvr) {
+ if ((wantvl) || (wantvr)) {
/*
Compute left and/or right eigenvectors
@@ -2028,7 +2030,7 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -2037,9 +2039,9 @@ L50:
*info = 0;
if (*n < 0) {
*info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
*info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
*info = -3;
} else if (*lda < max(1,*n)) {
*info = -5;
@@ -2224,7 +2226,7 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -2240,13 +2242,13 @@ L50:
lquery = *lwork == -1;
if (*n < 0) {
*info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
*info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
*info = -3;
} else if (*lda < max(1,*n)) {
*info = -5;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
+ } else if (*lwork < max(1,*n) && ! lquery) {
*info = -8;
}
if (*info != 0) {
@@ -2282,7 +2284,7 @@ L50:
nbmin = 2;
iws = 1;
- if ((nb > 1 && nb < nh)) {
+ if (nb > 1 && nb < nh) {
/*
Determine when to cross over from blocked to unblocked code
@@ -2320,7 +2322,7 @@ L50:
}
ldwork = *n;
- if (nb < nbmin || nb >= nh) {
+ if ((nb < nbmin) || (nb >= nh)) {
/* Use unblocked code below */
@@ -2472,7 +2474,7 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -2634,7 +2636,7 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -2652,7 +2654,7 @@ L50:
*info = -2;
} else if (*lda < max(1,*m)) {
*info = -4;
- } else if ((*lwork < max(1,*m) && ! lquery)) {
+ } else if (*lwork < max(1,*m) && ! lquery) {
*info = -7;
}
if (*info != 0) {
@@ -2674,7 +2676,7 @@ L50:
nbmin = 2;
nx = 0;
iws = *m;
- if ((nb > 1 && nb < k)) {
+ if (nb > 1 && nb < k) {
/*
Determine when to cross over from blocked to unblocked code.
@@ -2706,7 +2708,7 @@ L50:
}
}
- if (((nb >= nbmin && nb < k) && nx < k)) {
+ if (nb >= nbmin && nb < k && nx < k) {
/* Use blocked code initially */
@@ -2966,10 +2968,10 @@ L50:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
--s;
--work;
@@ -3011,7 +3013,7 @@ L50:
if (*info == 0) {
maxwrk = 0;
mm = *m;
- if ((*m >= *n && *m >= mnthr)) {
+ if (*m >= *n && *m >= mnthr) {
/* Path 1a - overdetermined, with many more rows than columns. */
@@ -3117,7 +3119,7 @@ L50:
d__1 = (doublereal) maxwrk;
z__1.r = d__1, z__1.i = 0.;
work[1].r = z__1.r, work[1].i = z__1.i;
- if ((*lwork < minwrk && ! lquery)) {
+ if (*lwork < minwrk && ! lquery) {
*info = -12;
}
}
@@ -3132,7 +3134,7 @@ L50:
/* Quick return if possible. */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
*rank = 0;
return 0;
}
@@ -3149,7 +3151,7 @@ L50:
anrm = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
iascl = 0;
- if ((anrm > 0. && anrm < smlnum)) {
+ if (anrm > 0. && anrm < smlnum) {
/* Scale matrix norm up to SMLNUM */
@@ -3179,7 +3181,7 @@ L50:
bnrm = zlange_("M", m, nrhs, &b[b_offset], ldb, &rwork[1]);
ibscl = 0;
- if ((bnrm > 0. && bnrm < smlnum)) {
+ if (bnrm > 0. && bnrm < smlnum) {
/* Scale matrix norm up to SMLNUM. */
@@ -3290,8 +3292,8 @@ L50:
/* Computing MAX */
i__1 = *m, i__2 = ((*m) << (1)) - 4, i__1 = max(i__1,i__2), i__1 =
max(i__1,*nrhs), i__2 = *n - *m * 3;
- if ((*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2)
- )) {
+ if (*n >= mnthr && *lwork >= ((*m) << (2)) + *m * *m + max(i__1,i__2))
+ {
/*
Path 2a - underdetermined, with many more columns than rows
@@ -3549,7 +3551,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -3709,7 +3711,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -3727,7 +3729,7 @@ L10:
*info = -2;
} else if (*lda < max(1,*m)) {
*info = -4;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
+ } else if (*lwork < max(1,*n) && ! lquery) {
*info = -7;
}
if (*info != 0) {
@@ -3749,7 +3751,7 @@ L10:
nbmin = 2;
nx = 0;
iws = *n;
- if ((nb > 1 && nb < k)) {
+ if (nb > 1 && nb < k) {
/*
Determine when to cross over from blocked to unblocked code.
@@ -3781,7 +3783,7 @@ L10:
}
}
- if (((nb >= nbmin && nb < k) && nx < k)) {
+ if (nb >= nbmin && nb < k && nx < k) {
/* Use blocked code initially */
@@ -4062,14 +4064,14 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--s;
u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
+ u_offset = 1 + u_dim1;
u -= u_offset;
vt_dim1 = *ldvt;
- vt_offset = 1 + vt_dim1 * 1;
+ vt_offset = 1 + vt_dim1;
vt -= vt_offset;
--work;
--rwork;
@@ -4082,14 +4084,14 @@ L10:
mnthr2 = (integer) (minmn * 5. / 3.);
wntqa = lsame_(jobz, "A");
wntqs = lsame_(jobz, "S");
- wntqas = wntqa || wntqs;
+ wntqas = (wntqa) || (wntqs);
wntqo = lsame_(jobz, "O");
wntqn = lsame_(jobz, "N");
minwrk = 1;
maxwrk = 1;
lquery = *lwork == -1;
- if (! (wntqa || wntqs || wntqo || wntqn)) {
+ if (! ((((wntqa) || (wntqs)) || (wntqo)) || (wntqn))) {
*info = -1;
} else if (*m < 0) {
*info = -2;
@@ -4097,11 +4099,11 @@ L10:
*info = -3;
} else if (*lda < max(1,*m)) {
*info = -5;
- } else if (*ldu < 1 || (wntqas && *ldu < *m) || ((wntqo && *m < *n) && *
+ } else if (((*ldu < 1) || (wntqas && *ldu < *m)) || (wntqo && *m < *n && *
ldu < *m)) {
*info = -8;
- } else if (*ldvt < 1 || (wntqa && *ldvt < *n) || (wntqs && *ldvt < minmn)
- || ((wntqo && *m >= *n) && *ldvt < *n)) {
+ } else if ((((*ldvt < 1) || (wntqa && *ldvt < *n)) || (wntqs && *ldvt <
+ minmn)) || (wntqo && *m >= *n && *ldvt < *n)) {
*info = -10;
}
@@ -4115,7 +4117,7 @@ L10:
immediately following subroutine, as returned by ILAENV.)
*/
- if (((*info == 0 && *m > 0) && *n > 0)) {
+ if (*info == 0 && *m > 0 && *n > 0) {
if (*m >= *n) {
/*
@@ -4502,7 +4504,7 @@ L10:
work[1].r = (doublereal) maxwrk, work[1].i = 0.;
}
- if ((*lwork < minwrk && ! lquery)) {
+ if (*lwork < minwrk && ! lquery) {
*info = -13;
}
if (*info != 0) {
@@ -4515,7 +4517,7 @@ L10:
/* Quick return if possible */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
if (*lwork >= 1) {
work[1].r = 1., work[1].i = 0.;
}
@@ -4532,7 +4534,7 @@ L10:
anrm = zlange_("M", m, n, &a[a_offset], lda, dum);
iscl = 0;
- if ((anrm > 0. && anrm < smlnum)) {
+ if (anrm > 0. && anrm < smlnum) {
iscl = 1;
zlascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
ierr);
@@ -6420,11 +6422,11 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--ipiv;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
/* Function Body */
@@ -6539,7 +6541,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--ipiv;
@@ -6560,7 +6562,7 @@ L10:
/* Quick return if possible */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
@@ -6573,7 +6575,7 @@ L10:
jp = j - 1 + izamax_(&i__2, &a[j + j * a_dim1], &c__1);
ipiv[j] = jp;
i__2 = jp + j * a_dim1;
- if (a[i__2].r != 0. || a[i__2].i != 0.) {
+ if ((a[i__2].r != 0.) || (a[i__2].i != 0.)) {
/* Apply the interchange to columns 1:N. */
@@ -6694,7 +6696,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--ipiv;
@@ -6715,7 +6717,7 @@ L10:
/* Quick return if possible */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
@@ -6723,7 +6725,7 @@ L10:
nb = ilaenv_(&c__1, "ZGETRF", " ", m, n, &c_n1, &c_n1, (ftnlen)6, (ftnlen)
1);
- if (nb <= 1 || nb >= min(*m,*n)) {
+ if ((nb <= 1) || (nb >= min(*m,*n))) {
/* Use unblocked code. */
@@ -6749,7 +6751,7 @@ L10:
/* Adjust INFO and the pivot indices. */
- if ((*info == 0 && iinfo > 0)) {
+ if (*info == 0 && iinfo > 0) {
*info = iinfo + j - 1;
}
/* Computing MIN */
@@ -6882,18 +6884,18 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--ipiv;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
/* Function Body */
*info = 0;
notran = lsame_(trans, "N");
- if (((! notran && ! lsame_(trans, "T")) && ! lsame_(
- trans, "C"))) {
+ if (! notran && ! lsame_(trans, "T") && ! lsame_(
+ trans, "C")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
@@ -6912,7 +6914,7 @@ L10:
/* Quick return if possible */
- if (*n == 0 || *nrhs == 0) {
+ if ((*n == 0) || (*nrhs == 0)) {
return 0;
}
@@ -7138,7 +7140,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--w;
--work;
@@ -7148,7 +7150,7 @@ L10:
/* Function Body */
wantz = lsame_(jobz, "V");
lower = lsame_(uplo, "L");
- lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+ lquery = ((*lwork == -1) || (*lrwork == -1)) || (*liwork == -1);
*info = 0;
if (*n <= 1) {
@@ -7174,19 +7176,19 @@ L10:
lropt = lrwmin;
liopt = liwmin;
}
- if (! (wantz || lsame_(jobz, "N"))) {
+ if (! ((wantz) || (lsame_(jobz, "N")))) {
*info = -1;
- } else if (! (lower || lsame_(uplo, "U"))) {
+ } else if (! ((lower) || (lsame_(uplo, "U")))) {
*info = -2;
} else if (*n < 0) {
*info = -3;
} else if (*lda < max(1,*n)) {
*info = -5;
- } else if ((*lwork < lwmin && ! lquery)) {
+ } else if (*lwork < lwmin && ! lquery) {
*info = -8;
- } else if ((*lrwork < lrwmin && ! lquery)) {
+ } else if (*lrwork < lrwmin && ! lquery) {
*info = -10;
- } else if ((*liwork < liwmin && ! lquery)) {
+ } else if (*liwork < liwmin && ! lquery) {
*info = -12;
}
@@ -7233,7 +7235,7 @@ L10:
anrm = zlanhe_("M", uplo, n, &a[a_offset], lda, &rwork[1]);
iscale = 0;
- if ((anrm > 0. && anrm < rmin)) {
+ if (anrm > 0. && anrm < rmin) {
iscale = 1;
sigma = rmin / anrm;
} else if (anrm > rmax) {
@@ -7454,7 +7456,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--d__;
--e;
@@ -7463,7 +7465,7 @@ L10:
/* Function Body */
*info = 0;
upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
+ if (! upper && ! lsame_(uplo, "L")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
@@ -7503,7 +7505,7 @@ L10:
i__1 = i__;
e[i__1] = alpha.r;
- if (taui.r != 0. || taui.i != 0.) {
+ if ((taui.r != 0.) || (taui.i != 0.)) {
/* Apply H(i) from both sides to A(1:i,1:i) */
@@ -7582,7 +7584,7 @@ L10:
i__2 = i__;
e[i__2] = alpha.r;
- if (taui.r != 0. || taui.i != 0.) {
+ if ((taui.r != 0.) || (taui.i != 0.)) {
/* Apply H(i) from both sides to A(i+1:n,i+1:n) */
@@ -7801,7 +7803,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--d__;
--e;
@@ -7812,13 +7814,13 @@ L10:
*info = 0;
upper = lsame_(uplo, "U");
lquery = *lwork == -1;
- if ((! upper && ! lsame_(uplo, "L"))) {
+ if (! upper && ! lsame_(uplo, "L")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*lda < max(1,*n)) {
*info = -4;
- } else if ((*lwork < 1 && ! lquery)) {
+ } else if (*lwork < 1 && ! lquery) {
*info = -9;
}
@@ -7849,7 +7851,7 @@ L10:
nx = *n;
iws = 1;
- if ((nb > 1 && nb < *n)) {
+ if (nb > 1 && nb < *n) {
/*
Determine when to cross over from blocked to unblocked code
@@ -8175,38 +8177,38 @@ L10:
/* Parameter adjustments */
h_dim1 = *ldh;
- h_offset = 1 + h_dim1 * 1;
+ h_offset = 1 + h_dim1;
h__ -= h_offset;
--w;
z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
+ z_offset = 1 + z_dim1;
z__ -= z_offset;
--work;
/* Function Body */
wantt = lsame_(job, "S");
initz = lsame_(compz, "I");
- wantz = initz || lsame_(compz, "V");
+ wantz = (initz) || (lsame_(compz, "V"));
*info = 0;
i__1 = max(1,*n);
work[1].r = (doublereal) i__1, work[1].i = 0.;
lquery = *lwork == -1;
- if ((! lsame_(job, "E") && ! wantt)) {
+ if (! lsame_(job, "E") && ! wantt) {
*info = -1;
- } else if ((! lsame_(compz, "N") && ! wantz)) {
+ } else if (! lsame_(compz, "N") && ! wantz) {
*info = -2;
} else if (*n < 0) {
*info = -3;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
*info = -4;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
*info = -5;
} else if (*ldh < max(1,*n)) {
*info = -7;
- } else if (*ldz < 1 || (wantz && *ldz < max(1,*n))) {
+ } else if ((*ldz < 1) || (wantz && *ldz < max(1,*n))) {
*info = -10;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
+ } else if (*lwork < max(1,*n) && ! lquery) {
*info = -12;
}
if (*info != 0) {
@@ -8334,7 +8336,7 @@ L10:
s_cat(ch__1, a__1, i__4, &c__2, (ftnlen)2);
maxb = ilaenv_(&c__8, "ZHSEQR", ch__1, n, ilo, ihi, &c_n1, (ftnlen)6, (
ftnlen)2);
- if (ns <= 1 || ns > nh || maxb >= nh) {
+ if (((ns <= 1) || (ns > nh)) || (maxb >= nh)) {
/* Use the standard double-shift algorithm */
@@ -8437,7 +8439,7 @@ L80:
i2 = i__;
}
- if (its == 20 || its == 30) {
+ if ((its == 20) || (its == 30)) {
/* Exceptional shifts. */
@@ -8812,21 +8814,21 @@ L180:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--d__;
--e;
--tauq;
--taup;
x_dim1 = *ldx;
- x_offset = 1 + x_dim1 * 1;
+ x_offset = 1 + x_dim1;
x -= x_offset;
y_dim1 = *ldy;
- y_offset = 1 + y_dim1 * 1;
+ y_offset = 1 + y_dim1;
y -= y_offset;
/* Function Body */
- if (*m <= 0 || *n <= 0) {
+ if ((*m <= 0) || (*n <= 0)) {
return 0;
}
@@ -9263,10 +9265,10 @@ L180:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
/* Function Body */
@@ -9375,10 +9377,10 @@ L180:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
/* Function Body */
@@ -9503,18 +9505,18 @@ L180:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--rwork;
/* Function Body */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
@@ -9751,10 +9753,10 @@ L180:
--d__;
--e;
q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
+ q_offset = 1 + q_dim1;
q -= q_offset;
qstore_dim1 = *ldqs;
- qstore_offset = 1 + qstore_dim1 * 1;
+ qstore_offset = 1 + qstore_dim1;
qstore -= qstore_offset;
--rwork;
--iwork;
@@ -10158,7 +10160,7 @@ L80:
/* Parameter adjustments */
--d__;
q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
+ q_offset = 1 + q_dim1;
q -= q_offset;
--indxq;
--qstore;
@@ -10182,7 +10184,7 @@ L80:
*/
if (*n < 0) {
*info = -1;
- } else if (min(1,*n) > *cutpnt || *n < *cutpnt) {
+ } else if ((min(1,*n) > *cutpnt) || (*n < *cutpnt)) {
*info = -2;
} else if (*qsiz < *n) {
*info = -3;
@@ -10451,13 +10453,13 @@ L80:
/* Parameter adjustments */
q_dim1 = *ldq;
- q_offset = 1 + q_dim1 * 1;
+ q_offset = 1 + q_dim1;
q -= q_offset;
--d__;
--z__;
--dlamda;
q2_dim1 = *ldq2;
- q2_offset = 1 + q2_dim1 * 1;
+ q2_offset = 1 + q2_dim1;
q2 -= q2_offset;
--w;
--indxp;
@@ -10476,7 +10478,7 @@ L80:
*info = -3;
} else if (*ldq < max(1,*n)) {
*info = -5;
- } else if (*cutpnt < min(1,*n) || *cutpnt > *n) {
+ } else if ((*cutpnt < min(1,*n)) || (*cutpnt > *n)) {
*info = -8;
} else if (*ldq2 < max(1,*n)) {
*info = -12;
@@ -10839,11 +10841,11 @@ L100:
/* Parameter adjustments */
h_dim1 = *ldh;
- h_offset = 1 + h_dim1 * 1;
+ h_offset = 1 + h_dim1;
h__ -= h_offset;
--w;
z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
+ z_offset = 1 + z_dim1;
z__ -= z_offset;
/* Function Body */
@@ -10960,7 +10962,7 @@ L30:
i2 = i__;
}
- if (its == 10 || its == 20) {
+ if ((its == 10) || (its == 20)) {
/* Exceptional shift. */
@@ -10980,7 +10982,7 @@ L30:
d__1 = h__[i__3].r;
z__1.r = d__1 * h__[i__2].r, z__1.i = d__1 * h__[i__2].i;
u.r = z__1.r, u.i = z__1.i;
- if (u.r != 0. || u.i != 0.) {
+ if ((u.r != 0.) || (u.i != 0.)) {
i__2 = i__ - 1 + (i__ - 1) * h_dim1;
z__2.r = h__[i__2].r - t.r, z__2.i = h__[i__2].i - t.i;
z__1.r = z__2.r * .5, z__1.i = z__2.i * .5;
@@ -11177,7 +11179,7 @@ L50:
}
}
- if ((k == m && m > l)) {
+ if (k == m && m > l) {
/*
If the QR step was started at row M > L because two
@@ -11410,13 +11412,13 @@ L130:
/* Parameter adjustments */
--tau;
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
+ t_offset = 1 + t_dim1;
t -= t_offset;
y_dim1 = *ldy;
- y_offset = 1 + y_dim1 * 1;
+ y_offset = 1 + y_dim1;
y -= y_offset;
/* Function Body */
@@ -11757,23 +11759,23 @@ L130:
/* Parameter adjustments */
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
bx_dim1 = *ldbx;
- bx_offset = 1 + bx_dim1 * 1;
+ bx_offset = 1 + bx_dim1;
bx -= bx_offset;
--perm;
givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
+ givcol_offset = 1 + givcol_dim1;
givcol -= givcol_offset;
difr_dim1 = *ldgnum;
- difr_offset = 1 + difr_dim1 * 1;
+ difr_offset = 1 + difr_dim1;
difr -= difr_offset;
poles_dim1 = *ldgnum;
- poles_offset = 1 + poles_dim1 * 1;
+ poles_offset = 1 + poles_dim1;
poles -= poles_offset;
givnum_dim1 = *ldgnum;
- givnum_offset = 1 + givnum_dim1 * 1;
+ givnum_offset = 1 + givnum_dim1;
givnum -= givnum_offset;
--difl;
--z__;
@@ -11782,13 +11784,13 @@ L130:
/* Function Body */
*info = 0;
- if (*icompq < 0 || *icompq > 1) {
+ if ((*icompq < 0) || (*icompq > 1)) {
*info = -1;
} else if (*nl < 1) {
*info = -2;
} else if (*nr < 1) {
*info = -3;
- } else if (*sqre < 0 || *sqre > 1) {
+ } else if ((*sqre < 0) || (*sqre > 1)) {
*info = -4;
}
@@ -11865,7 +11867,8 @@ L130:
difrj = -difr[j + difr_dim1];
dsigjp = -poles[j + 1 + ((poles_dim1) << (1))];
}
- if (z__[j] == 0. || poles[j + ((poles_dim1) << (1))] == 0.) {
+ if ((z__[j] == 0.) || (poles[j + ((poles_dim1) << (1))] == 0.)
+ ) {
rwork[j] = 0.;
} else {
rwork[j] = -poles[j + ((poles_dim1) << (1))] * z__[j] /
@@ -11873,8 +11876,8 @@ L130:
}
i__2 = j - 1;
for (i__ = 1; i__ <= i__2; ++i__) {
- if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
- == 0.) {
+ if ((z__[i__] == 0.) || (poles[i__ + ((poles_dim1) << (1))
+ ] == 0.)) {
rwork[i__] = 0.;
} else {
rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
@@ -11886,8 +11889,8 @@ L130:
}
i__2 = *k;
for (i__ = j + 1; i__ <= i__2; ++i__) {
- if (z__[i__] == 0. || poles[i__ + ((poles_dim1) << (1))]
- == 0.) {
+ if ((z__[i__] == 0.) || (poles[i__ + ((poles_dim1) << (1))
+ ] == 0.)) {
rwork[i__] = 0.;
} else {
rwork[i__] = poles[i__ + ((poles_dim1) << (1))] * z__[
@@ -12288,39 +12291,39 @@ L130:
/* Parameter adjustments */
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
bx_dim1 = *ldbx;
- bx_offset = 1 + bx_dim1 * 1;
+ bx_offset = 1 + bx_dim1;
bx -= bx_offset;
givnum_dim1 = *ldu;
- givnum_offset = 1 + givnum_dim1 * 1;
+ givnum_offset = 1 + givnum_dim1;
givnum -= givnum_offset;
poles_dim1 = *ldu;
- poles_offset = 1 + poles_dim1 * 1;
+ poles_offset = 1 + poles_dim1;
poles -= poles_offset;
z_dim1 = *ldu;
- z_offset = 1 + z_dim1 * 1;
+ z_offset = 1 + z_dim1;
z__ -= z_offset;
difr_dim1 = *ldu;
- difr_offset = 1 + difr_dim1 * 1;
+ difr_offset = 1 + difr_dim1;
difr -= difr_offset;
difl_dim1 = *ldu;
- difl_offset = 1 + difl_dim1 * 1;
+ difl_offset = 1 + difl_dim1;
difl -= difl_offset;
vt_dim1 = *ldu;
- vt_offset = 1 + vt_dim1 * 1;
+ vt_offset = 1 + vt_dim1;
vt -= vt_offset;
u_dim1 = *ldu;
- u_offset = 1 + u_dim1 * 1;
+ u_offset = 1 + u_dim1;
u -= u_offset;
--k;
--givptr;
perm_dim1 = *ldgcol;
- perm_offset = 1 + perm_dim1 * 1;
+ perm_offset = 1 + perm_dim1;
perm -= perm_offset;
givcol_dim1 = *ldgcol;
- givcol_offset = 1 + givcol_dim1 * 1;
+ givcol_offset = 1 + givcol_dim1;
givcol -= givcol_offset;
--c__;
--s;
@@ -12330,7 +12333,7 @@ L130:
/* Function Body */
*info = 0;
- if (*icompq < 0 || *icompq > 1) {
+ if ((*icompq < 0) || (*icompq > 1)) {
*info = -1;
} else if (*smlsiz < 3) {
*info = -2;
@@ -12945,7 +12948,7 @@ L330:
--d__;
--e;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
--work;
--rwork;
@@ -12958,7 +12961,7 @@ L330:
*info = -3;
} else if (*nrhs < 1) {
*info = -4;
- } else if (*ldb < 1 || *ldb < *n) {
+ } else if ((*ldb < 1) || (*ldb < *n)) {
*info = -8;
}
if (*info != 0) {
@@ -12971,7 +12974,7 @@ L330:
/* Set up the tolerance. */
- if (*rcond <= 0. || *rcond >= 1.) {
+ if ((*rcond <= 0.) || (*rcond >= 1.)) {
*rcond = eps;
}
@@ -13236,7 +13239,7 @@ L330:
i__1 = nm1;
for (i__ = 1; i__ <= i__1; ++i__) {
- if ((d__1 = e[i__], abs(d__1)) < eps || i__ == nm1) {
+ if (((d__1 = e[i__], abs(d__1)) < eps) || (i__ == nm1)) {
++nsub;
iwork[nsub] = st;
@@ -13586,7 +13589,7 @@ doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a,
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--work;
@@ -13609,8 +13612,8 @@ doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a,
}
/* L20: */
}
- } else if (lsame_(norm, "O") || *(unsigned char *)
- norm == '1') {
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
/* Find norm1(A). */
@@ -13652,7 +13655,8 @@ doublereal zlange_(char *norm, integer *m, integer *n, doublecomplex *a,
value = max(d__1,d__2);
/* L80: */
}
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
/* Find normF(A). */
@@ -13764,7 +13768,7 @@ doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a,
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--work;
@@ -13809,7 +13813,8 @@ doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a,
/* L40: */
}
}
- } else if (lsame_(norm, "I") || lsame_(norm, "O") || *(unsigned char *)norm == '1') {
+ } else if (((lsame_(norm, "I")) || (lsame_(norm,
+ "O"))) || (*(unsigned char *)norm == '1')) {
/* Find normI(A) ( = norm1(A), since A is hermitian). */
@@ -13857,7 +13862,8 @@ doublereal zlanhe_(char *norm, char *uplo, integer *n, doublecomplex *a,
/* L100: */
}
}
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
/* Find normF(A). */
@@ -13987,7 +13993,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--work;
@@ -14012,8 +14018,8 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
}
/* L20: */
}
- } else if (lsame_(norm, "O") || *(unsigned char *)
- norm == '1') {
+ } else if ((lsame_(norm, "O")) || (*(unsigned char *
+ )norm == '1')) {
/* Find norm1(A). */
@@ -14059,7 +14065,8 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
value = max(d__1,d__2);
/* L80: */
}
- } else if (lsame_(norm, "F") || lsame_(norm, "E")) {
+ } else if ((lsame_(norm, "F")) || (lsame_(norm,
+ "E"))) {
/* Find normF(A). */
@@ -14158,18 +14165,18 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
b_dim1 = *ldb;
- b_offset = 1 + b_dim1 * 1;
+ b_offset = 1 + b_dim1;
b -= b_offset;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--rwork;
/* Function Body */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
@@ -14315,7 +14322,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Parameter adjustments */
--v;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -14324,7 +14331,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Form H * C */
- if (tau->r != 0. || tau->i != 0.) {
+ if ((tau->r != 0.) || (tau->i != 0.)) {
/* w := C' * v */
@@ -14341,7 +14348,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Form C * H */
- if (tau->r != 0. || tau->i != 0.) {
+ if ((tau->r != 0.) || (tau->i != 0.)) {
/* w := C * v */
@@ -14475,20 +14482,20 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
/* Parameter adjustments */
v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
+ v_offset = 1 + v_dim1;
v -= v_offset;
t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
+ t_offset = 1 + t_dim1;
t -= t_offset;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
work_dim1 = *ldwork;
- work_offset = 1 + work_dim1 * 1;
+ work_offset = 1 + work_dim1;
work -= work_offset;
/* Function Body */
- if (*m <= 0 || *n <= 0) {
+ if ((*m <= 0) || (*n <= 0)) {
return 0;
}
@@ -15235,7 +15242,7 @@ doublereal zlanhs_(char *norm, integer *n, doublecomplex *a, integer *lda,
alphr = alpha->r;
alphi = d_imag(alpha);
- if ((xnorm == 0. && alphi == 0.)) {
+ if (xnorm == 0. && alphi == 0.) {
/* H = I */
@@ -15435,11 +15442,11 @@ L10:
/* Parameter adjustments */
v_dim1 = *ldv;
- v_offset = 1 + v_dim1 * 1;
+ v_offset = 1 + v_dim1;
v -= v_offset;
--tau;
t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
+ t_offset = 1 + t_dim1;
t -= t_offset;
/* Function Body */
@@ -15451,7 +15458,7 @@ L10:
i__1 = *k;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = i__;
- if ((tau[i__2].r == 0. && tau[i__2].i == 0.)) {
+ if (tau[i__2].r == 0. && tau[i__2].i == 0.) {
/* H(i) = I */
@@ -15517,7 +15524,7 @@ L10:
} else {
for (i__ = *k; i__ >= 1; --i__) {
i__1 = i__;
- if ((tau[i__1].r == 0. && tau[i__1].i == 0.)) {
+ if (tau[i__1].r == 0. && tau[i__1].i == 0.) {
/* H(i) = I */
@@ -15686,12 +15693,12 @@ L10:
/* Parameter adjustments */
--v;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
/* Function Body */
- if ((tau->r == 0. && tau->i == 0.)) {
+ if (tau->r == 0. && tau->i == 0.) {
return 0;
}
if (lsame_(side, "L")) {
@@ -17740,7 +17747,7 @@ L410:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
/* Function Body */
@@ -17770,25 +17777,25 @@ L410:
*info = -4;
} else if (*m < 0) {
*info = -6;
- } else if (*n < 0 || (itype == 4 && *n != *m) || (itype == 5 && *n != *m))
- {
+ } else if (((*n < 0) || (itype == 4 && *n != *m)) || (itype == 5 && *n !=
+ *m)) {
*info = -7;
- } else if ((itype <= 3 && *lda < max(1,*m))) {
+ } else if (itype <= 3 && *lda < max(1,*m)) {
*info = -9;
} else if (itype >= 4) {
/* Computing MAX */
i__1 = *m - 1;
- if (*kl < 0 || *kl > max(i__1,0)) {
+ if ((*kl < 0) || (*kl > max(i__1,0))) {
*info = -2;
} else /* if(complicated condition) */ {
/* Computing MAX */
i__1 = *n - 1;
- if (*ku < 0 || *ku > max(i__1,0) || ((itype == 4 || itype == 5) &&
- *kl != *ku)) {
+ if (((*ku < 0) || (*ku > max(i__1,0))) || (((itype == 4) || (
+ itype == 5)) && *kl != *ku)) {
*info = -3;
- } else if ((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda <
- *ku + 1) || (itype == 6 && *lda < ((*kl) << (1)) + *ku +
- 1)) {
+ } else if (((itype == 4 && *lda < *kl + 1) || (itype == 5 && *lda
+ < *ku + 1)) || (itype == 6 && *lda < ((*kl) << (1)) + *ku
+ + 1)) {
*info = -9;
}
}
@@ -17802,7 +17809,7 @@ L410:
/* Quick return if possible */
- if (*n == 0 || *m == 0) {
+ if ((*n == 0) || (*m == 0)) {
return 0;
}
@@ -17817,7 +17824,7 @@ L410:
L10:
cfrom1 = cfromc * smlnum;
cto1 = ctoc / bignum;
- if ((abs(cfrom1) > abs(ctoc) && ctoc != 0.)) {
+ if (abs(cfrom1) > abs(ctoc) && ctoc != 0.) {
mul = smlnum;
done = FALSE_;
cfromc = cfrom1;
@@ -18041,7 +18048,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
/* Function Body */
@@ -18245,18 +18252,18 @@ L10:
--c__;
--s;
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
/* Function Body */
info = 0;
- if (! (lsame_(side, "L") || lsame_(side, "R"))) {
+ if (! ((lsame_(side, "L")) || (lsame_(side, "R")))) {
info = 1;
- } else if (! (lsame_(pivot, "V") || lsame_(pivot,
- "T") || lsame_(pivot, "B"))) {
+ } else if (! (((lsame_(pivot, "V")) || (lsame_(
+ pivot, "T"))) || (lsame_(pivot, "B")))) {
info = 2;
- } else if (! (lsame_(direct, "F") || lsame_(direct,
- "B"))) {
+ } else if (! ((lsame_(direct, "F")) || (lsame_(
+ direct, "B")))) {
info = 3;
} else if (*m < 0) {
info = 4;
@@ -18272,7 +18279,7 @@ L10:
/* Quick return if possible */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
if (lsame_(side, "L")) {
@@ -18285,7 +18292,7 @@ L10:
for (j = 1; j <= i__1; ++j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *n;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = j + 1 + i__ * a_dim1;
@@ -18315,7 +18322,7 @@ L10:
for (j = *m - 1; j >= 1; --j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *n;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = j + 1 + i__ * a_dim1;
@@ -18348,7 +18355,7 @@ L10:
for (j = 2; j <= i__1; ++j) {
ctemp = c__[j - 1];
stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *n;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = j + i__ * a_dim1;
@@ -18378,7 +18385,7 @@ L10:
for (j = *m; j >= 2; --j) {
ctemp = c__[j - 1];
stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *n;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = j + i__ * a_dim1;
@@ -18411,7 +18418,7 @@ L10:
for (j = 1; j <= i__1; ++j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *n;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = j + i__ * a_dim1;
@@ -18441,7 +18448,7 @@ L10:
for (j = *m - 1; j >= 1; --j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *n;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = j + i__ * a_dim1;
@@ -18479,7 +18486,7 @@ L10:
for (j = 1; j <= i__1; ++j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *m;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = i__ + (j + 1) * a_dim1;
@@ -18509,7 +18516,7 @@ L10:
for (j = *n - 1; j >= 1; --j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *m;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = i__ + (j + 1) * a_dim1;
@@ -18542,7 +18549,7 @@ L10:
for (j = 2; j <= i__1; ++j) {
ctemp = c__[j - 1];
stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *m;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = i__ + j * a_dim1;
@@ -18572,7 +18579,7 @@ L10:
for (j = *n; j >= 2; --j) {
ctemp = c__[j - 1];
stemp = s[j - 1];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *m;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = i__ + j * a_dim1;
@@ -18605,7 +18612,7 @@ L10:
for (j = 1; j <= i__1; ++j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__2 = *m;
for (i__ = 1; i__ <= i__2; ++i__) {
i__3 = i__ + j * a_dim1;
@@ -18635,7 +18642,7 @@ L10:
for (j = *n - 1; j >= 1; --j) {
ctemp = c__[j];
stemp = s[j];
- if (ctemp != 1. || stemp != 0.) {
+ if ((ctemp != 1.) || (stemp != 0.)) {
i__1 = *m;
for (i__ = 1; i__ <= i__1; ++i__) {
i__2 = i__ + j * a_dim1;
@@ -18856,7 +18863,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--ipiv;
@@ -19100,12 +19107,12 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--e;
--tau;
w_dim1 = *ldw;
- w_offset = 1 + w_dim1 * 1;
+ w_offset = 1 + w_dim1;
w -= w_offset;
/* Function Body */
@@ -19547,7 +19554,7 @@ L10:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--x;
--cnorm;
@@ -19560,15 +19567,15 @@ L10:
/* Test the input parameters. */
- if ((! upper && ! lsame_(uplo, "L"))) {
+ if (! upper && ! lsame_(uplo, "L")) {
*info = -1;
- } else if (((! notran && ! lsame_(trans, "T")) && !
- lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "T") && !
+ lsame_(trans, "C")) {
*info = -2;
- } else if ((! nounit && ! lsame_(diag, "U"))) {
+ } else if (! nounit && ! lsame_(diag, "U")) {
*info = -3;
- } else if ((! lsame_(normin, "Y") && ! lsame_(
- normin, "N"))) {
+ } else if (! lsame_(normin, "Y") && ! lsame_(normin,
+ "N")) {
*info = -4;
} else if (*n < 0) {
*info = -5;
@@ -20104,7 +20111,7 @@ L110:
}
csumj.r = 0., csumj.i = 0.;
- if ((uscal.r == 1. && uscal.i == 0.)) {
+ if (uscal.r == 1. && uscal.i == 0.) {
/*
If the scaling needed for A in the dot product is 1,
@@ -20162,7 +20169,7 @@ L110:
}
z__1.r = tscal, z__1.i = 0.;
- if ((uscal.r == z__1.r && uscal.i == z__1.i)) {
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
/*
Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
@@ -20319,7 +20326,7 @@ L160:
}
csumj.r = 0., csumj.i = 0.;
- if ((uscal.r == 1. && uscal.i == 0.)) {
+ if (uscal.r == 1. && uscal.i == 0.) {
/*
If the scaling needed for A in the dot product is 1,
@@ -20377,7 +20384,7 @@ L160:
}
z__1.r = tscal, z__1.i = 0.;
- if ((uscal.r == z__1.r && uscal.i == z__1.i)) {
+ if (uscal.r == z__1.r && uscal.i == z__1.i) {
/*
Compute x(j) := ( x(j) - CSUMJ ) / A(j,j) if 1/A(j,j)
@@ -20497,6 +20504,363 @@ L210:
} /* zlatrs_ */
+/* Subroutine */ int zlauu2_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3;
+ doublereal d__1;
+ doublecomplex z__1;
+
+ /* Local variables */
+ static integer i__;
+ static doublereal aii;
+ extern logical lsame_(char *, char *);
+ extern /* Double Complex */ VOID zdotc_(doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *);
+ extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *, doublecomplex *, doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int xerbla_(char *, integer *), zdscal_(
+ integer *, doublereal *, doublecomplex *, integer *), zlacgv_(
+ integer *, doublecomplex *, integer *);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLAUU2 computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the unblocked form of the algorithm, calling Level 2 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAUU2", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ zdotc_(&z__1, &i__3, &a[i__ + (i__ + 1) * a_dim1], lda, &a[
+ i__ + (i__ + 1) * a_dim1], lda);
+ d__1 = aii * aii + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ i__2 = i__ - 1;
+ i__3 = *n - i__;
+ z__1.r = aii, z__1.i = 0.;
+ zgemv_("No transpose", &i__2, &i__3, &c_b60, &a[(i__ + 1) *
+ a_dim1 + 1], lda, &a[i__ + (i__ + 1) * a_dim1], lda, &
+ z__1, &a[i__ * a_dim1 + 1], &c__1);
+ i__2 = *n - i__;
+ zlacgv_(&i__2, &a[i__ + (i__ + 1) * a_dim1], lda);
+ } else {
+ zdscal_(&i__, &aii, &a[i__ * a_dim1 + 1], &c__1);
+ }
+/* L10: */
+ }
+
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__1 = *n;
+ for (i__ = 1; i__ <= i__1; ++i__) {
+ i__2 = i__ + i__ * a_dim1;
+ aii = a[i__2].r;
+ if (i__ < *n) {
+ i__2 = i__ + i__ * a_dim1;
+ i__3 = *n - i__;
+ zdotc_(&z__1, &i__3, &a[i__ + 1 + i__ * a_dim1], &c__1, &a[
+ i__ + 1 + i__ * a_dim1], &c__1);
+ d__1 = aii * aii + z__1.r;
+ a[i__2].r = d__1, a[i__2].i = 0.;
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ i__2 = *n - i__;
+ i__3 = i__ - 1;
+ z__1.r = aii, z__1.i = 0.;
+ zgemv_("Conjugate transpose", &i__2, &i__3, &c_b60, &a[i__ +
+ 1 + a_dim1], lda, &a[i__ + 1 + i__ * a_dim1], &c__1, &
+ z__1, &a[i__ + a_dim1], lda);
+ i__2 = i__ - 1;
+ zlacgv_(&i__2, &a[i__ + a_dim1], lda);
+ } else {
+ zdscal_(&i__, &aii, &a[i__ + a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZLAUU2 */
+
+} /* zlauu2_ */
+
+/* Subroutine */ int zlauum_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2, i__3, i__4;
+
+ /* Local variables */
+ static integer i__, ib, nb;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
+ integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *, doublecomplex *, doublecomplex *,
+ integer *), zherk_(char *, char *, integer *,
+ integer *, doublereal *, doublecomplex *, integer *, doublereal *,
+ doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ zlauu2_(char *, integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+
+
+/*
+ -- LAPACK auxiliary routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZLAUUM computes the product U * U' or L' * L, where the triangular
+ factor U or L is stored in the upper or lower triangular part of
+ the array A.
+
+ If UPLO = 'U' or 'u' then the upper triangle of the result is stored,
+ overwriting the factor U in A.
+ If UPLO = 'L' or 'l' then the lower triangle of the result is stored,
+ overwriting the factor L in A.
+
+ This is the blocked form of the algorithm, calling Level 3 BLAS.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the triangular factor stored in the array A
+ is upper or lower triangular:
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ N (input) INTEGER
+ The order of the triangular factor U or L. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the triangular factor U or L.
+ On exit, if UPLO = 'U', the upper triangle of A is
+ overwritten with the upper triangle of the product U * U';
+ if UPLO = 'L', the lower triangle of A is overwritten with
+ the lower triangle of the product L' * L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZLAUUM", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Determine the block size for this environment. */
+
+ nb = ilaenv_(&c__1, "ZLAUUM", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)1);
+
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ zlauu2_(uplo, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute the product U * U'. */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ztrmm_("Right", "Upper", "Conjugate transpose", "Non-unit", &
+ i__3, &ib, &c_b60, &a[i__ + i__ * a_dim1], lda, &a[
+ i__ * a_dim1 + 1], lda);
+ zlauu2_("Upper", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ zgemm_("No transpose", "Conjugate transpose", &i__3, &ib,
+ &i__4, &c_b60, &a[(i__ + ib) * a_dim1 + 1], lda, &
+ a[i__ + (i__ + ib) * a_dim1], lda, &c_b60, &a[i__
+ * a_dim1 + 1], lda);
+ i__3 = *n - i__ - ib + 1;
+ zherk_("Upper", "No transpose", &ib, &i__3, &c_b1015, &a[
+ i__ + (i__ + ib) * a_dim1], lda, &c_b1015, &a[i__
+ + i__ * a_dim1], lda);
+ }
+/* L10: */
+ }
+ } else {
+
+/* Compute the product L' * L. */
+
+ i__2 = *n;
+ i__1 = nb;
+ for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) {
+/* Computing MIN */
+ i__3 = nb, i__4 = *n - i__ + 1;
+ ib = min(i__3,i__4);
+ i__3 = i__ - 1;
+ ztrmm_("Left", "Lower", "Conjugate transpose", "Non-unit", &
+ ib, &i__3, &c_b60, &a[i__ + i__ * a_dim1], lda, &a[
+ i__ + a_dim1], lda);
+ zlauu2_("Lower", &ib, &a[i__ + i__ * a_dim1], lda, info);
+ if (i__ + ib <= *n) {
+ i__3 = i__ - 1;
+ i__4 = *n - i__ - ib + 1;
+ zgemm_("Conjugate transpose", "No transpose", &ib, &i__3,
+ &i__4, &c_b60, &a[i__ + ib + i__ * a_dim1], lda, &
+ a[i__ + ib + a_dim1], lda, &c_b60, &a[i__ +
+ a_dim1], lda);
+ i__3 = *n - i__ - ib + 1;
+ zherk_("Lower", "Conjugate transpose", &ib, &i__3, &
+ c_b1015, &a[i__ + ib + i__ * a_dim1], lda, &
+ c_b1015, &a[i__ + i__ * a_dim1], lda);
+ }
+/* L20: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZLAUUM */
+
+} /* zlauum_ */
+
/* Subroutine */ int zpotf2_(char *uplo, integer *n, doublecomplex *a,
integer *lda, integer *info)
{
@@ -20585,13 +20949,13 @@ L210:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
/* Function Body */
*info = 0;
upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
+ if (! upper && ! lsame_(uplo, "L")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
@@ -20795,13 +21159,13 @@ L40:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
/* Function Body */
*info = 0;
upper = lsame_(uplo, "U");
- if ((! upper && ! lsame_(uplo, "L"))) {
+ if (! upper && ! lsame_(uplo, "L")) {
*info = -1;
} else if (*n < 0) {
*info = -2;
@@ -20824,7 +21188,7 @@ L40:
nb = ilaenv_(&c__1, "ZPOTRF", uplo, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
ftnlen)1);
- if (nb <= 1 || nb >= *n) {
+ if ((nb <= 1) || (nb >= *n)) {
/* Use unblocked code. */
@@ -20931,6 +21295,249 @@ L40:
} /* zpotrf_ */
+/* Subroutine */ int zpotri_(char *uplo, integer *n, doublecomplex *a,
+ integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int xerbla_(char *, integer *), zlauum_(
+ char *, integer *, doublecomplex *, integer *, integer *),
+ ztrtri_(char *, char *, integer *, doublecomplex *, integer *,
+ integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ March 31, 1993
+
+
+ Purpose
+ =======
+
+ ZPOTRI computes the inverse of a complex Hermitian positive definite
+ matrix A using the Cholesky factorization A = U**H*U or A = L*L**H
+ computed by ZPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the triangular factor U or L from the Cholesky
+ factorization A = U**H*U or A = L*L**H, as computed by
+ ZPOTRF.
+ On exit, the upper or lower triangle of the (Hermitian)
+ inverse of A, overwriting the input factor U or L.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, the (i,i) element of the factor U or L is
+ zero, and the inverse could not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ if (! lsame_(uplo, "U") && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*lda < max(1,*n)) {
+ *info = -4;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Invert the triangular Cholesky factor U or L. */
+
+ ztrtri_(uplo, "Non-unit", n, &a[a_offset], lda, info);
+ if (*info > 0) {
+ return 0;
+ }
+
+/* Form inv(U)*inv(U)' or inv(L)'*inv(L). */
+
+ zlauum_(uplo, n, &a[a_offset], lda, info);
+
+ return 0;
+
+/* End of ZPOTRI */
+
+} /* zpotri_ */
+
+/* Subroutine */ int zpotrs_(char *uplo, integer *n, integer *nrhs,
+ doublecomplex *a, integer *lda, doublecomplex *b, integer *ldb,
+ integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, b_dim1, b_offset, i__1;
+
+ /* Local variables */
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ xerbla_(char *, integer *);
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZPOTRS solves a system of linear equations A*X = B with a Hermitian
+ positive definite matrix A using the Cholesky factorization
+ A = U**H*U or A = L*L**H computed by ZPOTRF.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': Upper triangle of A is stored;
+ = 'L': Lower triangle of A is stored.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ NRHS (input) INTEGER
+ The number of right hand sides, i.e., the number of columns
+ of the matrix B. NRHS >= 0.
+
+ A (input) COMPLEX*16 array, dimension (LDA,N)
+ The triangular factor U or L from the Cholesky factorization
+ A = U**H*U or A = L*L**H, as computed by ZPOTRF.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ B (input/output) COMPLEX*16 array, dimension (LDB,NRHS)
+ On entry, the right hand side matrix B.
+ On exit, the solution matrix X.
+
+ LDB (input) INTEGER
+ The leading dimension of the array B. LDB >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = *ldb;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (*n < 0) {
+ *info = -2;
+ } else if (*nrhs < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ } else if (*ldb < max(1,*n)) {
+ *info = -7;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZPOTRS", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if ((*n == 0) || (*nrhs == 0)) {
+ return 0;
+ }
+
+ if (upper) {
+
+/*
+ Solve A*X = B where A = U'*U.
+
+ Solve U'*X = B, overwriting B with X.
+*/
+
+ ztrsm_("Left", "Upper", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b60, &a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve U*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Upper", "No transpose", "Non-unit", n, nrhs, &c_b60, &
+ a[a_offset], lda, &b[b_offset], ldb);
+ } else {
+
+/*
+ Solve A*X = B where A = L*L'.
+
+ Solve L*X = B, overwriting B with X.
+*/
+
+ ztrsm_("Left", "Lower", "No transpose", "Non-unit", n, nrhs, &c_b60, &
+ a[a_offset], lda, &b[b_offset], ldb);
+
+/* Solve L'*X = B, overwriting B with X. */
+
+ ztrsm_("Left", "Lower", "Conjugate transpose", "Non-unit", n, nrhs, &
+ c_b60, &a[a_offset], lda, &b[b_offset], ldb);
+ }
+
+ return 0;
+
+/* End of ZPOTRS */
+
+} /* zpotrs_ */
+
/* Subroutine */ int zstedc_(char *compz, integer *n, doublereal *d__,
doublereal *e, doublecomplex *z__, integer *ldz, doublecomplex *work,
integer *lwork, doublereal *rwork, integer *lrwork, integer *iwork,
@@ -21112,7 +21719,7 @@ L40:
--d__;
--e;
z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
+ z_offset = 1 + z_dim1;
z__ -= z_offset;
--work;
--rwork;
@@ -21120,7 +21727,7 @@ L40:
/* Function Body */
*info = 0;
- lquery = *lwork == -1 || *lrwork == -1 || *liwork == -1;
+ lquery = ((*lwork == -1) || (*lrwork == -1)) || (*liwork == -1);
if (lsame_(compz, "N")) {
icompz = 0;
@@ -21131,7 +21738,7 @@ L40:
} else {
icompz = -1;
}
- if (*n <= 1 || icompz <= 0) {
+ if ((*n <= 1) || (icompz <= 0)) {
lwmin = 1;
liwmin = 1;
lrwmin = 1;
@@ -21161,13 +21768,13 @@ L40:
*info = -1;
} else if (*n < 0) {
*info = -2;
- } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
*info = -6;
- } else if ((*lwork < lwmin && ! lquery)) {
+ } else if (*lwork < lwmin && ! lquery) {
*info = -8;
- } else if ((*lrwork < lrwmin && ! lquery)) {
+ } else if (*lrwork < lrwmin && ! lquery) {
*info = -10;
- } else if ((*liwork < liwmin && ! lquery)) {
+ } else if (*liwork < liwmin && ! lquery) {
*info = -12;
}
@@ -21517,7 +22124,7 @@ L40:
--d__;
--e;
z_dim1 = *ldz;
- z_offset = 1 + z_dim1 * 1;
+ z_offset = 1 + z_dim1;
z__ -= z_offset;
--work;
@@ -21537,7 +22144,7 @@ L40:
*info = -1;
} else if (*n < 0) {
*info = -2;
- } else if (*ldz < 1 || (icompz > 0 && *ldz < max(1,*n))) {
+ } else if ((*ldz < 1) || (icompz > 0 && *ldz < max(1,*n))) {
*info = -6;
}
if (*info != 0) {
@@ -22169,21 +22776,21 @@ L160:
/* Parameter adjustments */
--select;
t_dim1 = *ldt;
- t_offset = 1 + t_dim1 * 1;
+ t_offset = 1 + t_dim1;
t -= t_offset;
vl_dim1 = *ldvl;
- vl_offset = 1 + vl_dim1 * 1;
+ vl_offset = 1 + vl_dim1;
vl -= vl_offset;
vr_dim1 = *ldvr;
- vr_offset = 1 + vr_dim1 * 1;
+ vr_offset = 1 + vr_dim1;
vr -= vr_offset;
--work;
--rwork;
/* Function Body */
bothv = lsame_(side, "B");
- rightv = lsame_(side, "R") || bothv;
- leftv = lsame_(side, "L") || bothv;
+ rightv = (lsame_(side, "R")) || (bothv);
+ leftv = (lsame_(side, "L")) || (bothv);
allv = lsame_(howmny, "A");
over = lsame_(howmny, "B");
@@ -22208,17 +22815,17 @@ L160:
}
*info = 0;
- if ((! rightv && ! leftv)) {
+ if (! rightv && ! leftv) {
*info = -1;
- } else if (((! allv && ! over) && ! somev)) {
+ } else if (! allv && ! over && ! somev) {
*info = -2;
} else if (*n < 0) {
*info = -4;
} else if (*ldt < max(1,*n)) {
*info = -6;
- } else if (*ldvl < 1 || (leftv && *ldvl < *n)) {
+ } else if ((*ldvl < 1) || (leftv && *ldvl < *n)) {
*info = -8;
- } else if (*ldvr < 1 || (rightv && *ldvr < *n)) {
+ } else if ((*ldvr < 1) || (rightv && *ldvr < *n)) {
*info = -10;
} else if (*mm < *m) {
*info = -11;
@@ -22498,6 +23105,387 @@ L130:
} /* ztrevc_ */
+/* Subroutine */ int ztrti2_(char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ integer a_dim1, a_offset, i__1, i__2;
+ doublecomplex z__1;
+
+ /* Builtin functions */
+ void z_div(doublecomplex *, doublecomplex *, doublecomplex *);
+
+ /* Local variables */
+ static integer j;
+ static doublecomplex ajj;
+ extern logical lsame_(char *, char *);
+ extern /* Subroutine */ int zscal_(integer *, doublecomplex *,
+ doublecomplex *, integer *);
+ static logical upper;
+ extern /* Subroutine */ int ztrmv_(char *, char *, char *, integer *,
+ doublecomplex *, integer *, doublecomplex *, integer *), xerbla_(char *, integer *);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZTRTI2 computes the inverse of a complex upper or lower triangular
+ matrix.
+
+ This is the Level 2 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ Specifies whether the matrix A is upper or lower triangular.
+ = 'U': Upper triangular
+ = 'L': Lower triangular
+
+ DIAG (input) CHARACTER*1
+ Specifies whether or not the matrix A is unit triangular.
+ = 'N': Non-unit triangular
+ = 'U': Unit triangular
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading n by n upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading n by n lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -k, the k-th argument had an illegal value
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTI2", &i__1);
+ return 0;
+ }
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix. */
+
+ i__1 = *n;
+ for (j = 1; j <= i__1; ++j) {
+ if (nounit) {
+ i__2 = j + j * a_dim1;
+ z_div(&z__1, &c_b60, &a[j + j * a_dim1]);
+ a[i__2].r = z__1.r, a[i__2].i = z__1.i;
+ i__2 = j + j * a_dim1;
+ z__1.r = -a[i__2].r, z__1.i = -a[i__2].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+
+/* Compute elements 1:j-1 of j-th column. */
+
+ i__2 = j - 1;
+ ztrmv_("Upper", "No transpose", diag, &i__2, &a[a_offset], lda, &
+ a[j * a_dim1 + 1], &c__1);
+ i__2 = j - 1;
+ zscal_(&i__2, &ajj, &a[j * a_dim1 + 1], &c__1);
+/* L10: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix. */
+
+ for (j = *n; j >= 1; --j) {
+ if (nounit) {
+ i__1 = j + j * a_dim1;
+ z_div(&z__1, &c_b60, &a[j + j * a_dim1]);
+ a[i__1].r = z__1.r, a[i__1].i = z__1.i;
+ i__1 = j + j * a_dim1;
+ z__1.r = -a[i__1].r, z__1.i = -a[i__1].i;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ } else {
+ z__1.r = -1., z__1.i = -0.;
+ ajj.r = z__1.r, ajj.i = z__1.i;
+ }
+ if (j < *n) {
+
+/* Compute elements j+1:n of j-th column. */
+
+ i__1 = *n - j;
+ ztrmv_("Lower", "No transpose", diag, &i__1, &a[j + 1 + (j +
+ 1) * a_dim1], lda, &a[j + 1 + j * a_dim1], &c__1);
+ i__1 = *n - j;
+ zscal_(&i__1, &ajj, &a[j + 1 + j * a_dim1], &c__1);
+ }
+/* L20: */
+ }
+ }
+
+ return 0;
+
+/* End of ZTRTI2 */
+
+} /* ztrti2_ */
+
+/* Subroutine */ int ztrtri_(char *uplo, char *diag, integer *n,
+ doublecomplex *a, integer *lda, integer *info)
+{
+ /* System generated locals */
+ address a__1[2];
+ integer a_dim1, a_offset, i__1, i__2, i__3[2], i__4, i__5;
+ doublecomplex z__1;
+ char ch__1[2];
+
+ /* Builtin functions */
+ /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen);
+
+ /* Local variables */
+ static integer j, jb, nb, nn;
+ extern logical lsame_(char *, char *);
+ static logical upper;
+ extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
+ integer *, integer *, doublecomplex *, doublecomplex *, integer *,
+ doublecomplex *, integer *),
+ ztrsm_(char *, char *, char *, char *, integer *, integer *,
+ doublecomplex *, doublecomplex *, integer *, doublecomplex *,
+ integer *), ztrti2_(char *, char *
+ , integer *, doublecomplex *, integer *, integer *), xerbla_(char *, integer *);
+ extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
+ integer *, integer *, ftnlen, ftnlen);
+ static logical nounit;
+
+
+/*
+ -- LAPACK routine (version 3.0) --
+ Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
+ Courant Institute, Argonne National Lab, and Rice University
+ September 30, 1994
+
+
+ Purpose
+ =======
+
+ ZTRTRI computes the inverse of a complex upper or lower triangular
+ matrix A.
+
+ This is the Level 3 BLAS version of the algorithm.
+
+ Arguments
+ =========
+
+ UPLO (input) CHARACTER*1
+ = 'U': A is upper triangular;
+ = 'L': A is lower triangular.
+
+ DIAG (input) CHARACTER*1
+ = 'N': A is non-unit triangular;
+ = 'U': A is unit triangular.
+
+ N (input) INTEGER
+ The order of the matrix A. N >= 0.
+
+ A (input/output) COMPLEX*16 array, dimension (LDA,N)
+ On entry, the triangular matrix A. If UPLO = 'U', the
+ leading N-by-N upper triangular part of the array A contains
+ the upper triangular matrix, and the strictly lower
+ triangular part of A is not referenced. If UPLO = 'L', the
+ leading N-by-N lower triangular part of the array A contains
+ the lower triangular matrix, and the strictly upper
+ triangular part of A is not referenced. If DIAG = 'U', the
+ diagonal elements of A are also not referenced and are
+ assumed to be 1.
+ On exit, the (triangular) inverse of the original matrix, in
+ the same storage format.
+
+ LDA (input) INTEGER
+ The leading dimension of the array A. LDA >= max(1,N).
+
+ INFO (output) INTEGER
+ = 0: successful exit
+ < 0: if INFO = -i, the i-th argument had an illegal value
+ > 0: if INFO = i, A(i,i) is exactly zero. The triangular
+ matrix is singular and its inverse can not be computed.
+
+ =====================================================================
+
+
+ Test the input parameters.
+*/
+
+ /* Parameter adjustments */
+ a_dim1 = *lda;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+
+ /* Function Body */
+ *info = 0;
+ upper = lsame_(uplo, "U");
+ nounit = lsame_(diag, "N");
+ if (! upper && ! lsame_(uplo, "L")) {
+ *info = -1;
+ } else if (! nounit && ! lsame_(diag, "U")) {
+ *info = -2;
+ } else if (*n < 0) {
+ *info = -3;
+ } else if (*lda < max(1,*n)) {
+ *info = -5;
+ }
+ if (*info != 0) {
+ i__1 = -(*info);
+ xerbla_("ZTRTRI", &i__1);
+ return 0;
+ }
+
+/* Quick return if possible */
+
+ if (*n == 0) {
+ return 0;
+ }
+
+/* Check for singularity if non-unit. */
+
+ if (nounit) {
+ i__1 = *n;
+ for (*info = 1; *info <= i__1; ++(*info)) {
+ i__2 = *info + *info * a_dim1;
+ if (a[i__2].r == 0. && a[i__2].i == 0.) {
+ return 0;
+ }
+/* L10: */
+ }
+ *info = 0;
+ }
+
+/*
+ Determine the block size for this environment.
+
+ Writing concatenation
+*/
+ i__3[0] = 1, a__1[0] = uplo;
+ i__3[1] = 1, a__1[1] = diag;
+ s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
+ nb = ilaenv_(&c__1, "ZTRTRI", ch__1, n, &c_n1, &c_n1, &c_n1, (ftnlen)6, (
+ ftnlen)2);
+ if ((nb <= 1) || (nb >= *n)) {
+
+/* Use unblocked code */
+
+ ztrti2_(uplo, diag, n, &a[a_offset], lda, info);
+ } else {
+
+/* Use blocked code */
+
+ if (upper) {
+
+/* Compute inverse of upper triangular matrix */
+
+ i__1 = *n;
+ i__2 = nb;
+ for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) {
+/* Computing MIN */
+ i__4 = nb, i__5 = *n - j + 1;
+ jb = min(i__4,i__5);
+
+/* Compute rows 1:j-1 of current block column */
+
+ i__4 = j - 1;
+ ztrmm_("Left", "Upper", "No transpose", diag, &i__4, &jb, &
+ c_b60, &a[a_offset], lda, &a[j * a_dim1 + 1], lda);
+ i__4 = j - 1;
+ z__1.r = -1., z__1.i = -0.;
+ ztrsm_("Right", "Upper", "No transpose", diag, &i__4, &jb, &
+ z__1, &a[j + j * a_dim1], lda, &a[j * a_dim1 + 1],
+ lda);
+
+/* Compute inverse of current diagonal block */
+
+ ztrti2_("Upper", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L20: */
+ }
+ } else {
+
+/* Compute inverse of lower triangular matrix */
+
+ nn = (*n - 1) / nb * nb + 1;
+ i__2 = -nb;
+ for (j = nn; i__2 < 0 ? j >= 1 : j <= 1; j += i__2) {
+/* Computing MIN */
+ i__1 = nb, i__4 = *n - j + 1;
+ jb = min(i__1,i__4);
+ if (j + jb <= *n) {
+
+/* Compute rows j+jb:n of current block column */
+
+ i__1 = *n - j - jb + 1;
+ ztrmm_("Left", "Lower", "No transpose", diag, &i__1, &jb,
+ &c_b60, &a[j + jb + (j + jb) * a_dim1], lda, &a[j
+ + jb + j * a_dim1], lda);
+ i__1 = *n - j - jb + 1;
+ z__1.r = -1., z__1.i = -0.;
+ ztrsm_("Right", "Lower", "No transpose", diag, &i__1, &jb,
+ &z__1, &a[j + j * a_dim1], lda, &a[j + jb + j *
+ a_dim1], lda);
+ }
+
+/* Compute inverse of current diagonal block */
+
+ ztrti2_("Lower", diag, &jb, &a[j + j * a_dim1], lda, info);
+/* L30: */
+ }
+ }
+ }
+
+ return 0;
+
+/* End of ZTRTRI */
+
+} /* ztrtri_ */
+
/* Subroutine */ int zung2r_(integer *m, integer *n, integer *k,
doublecomplex *a, integer *lda, doublecomplex *tau, doublecomplex *
work, integer *info)
@@ -22573,7 +23561,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -22582,9 +23570,9 @@ L130:
*info = 0;
if (*m < 0) {
*info = -1;
- } else if (*n < 0 || *n > *m) {
+ } else if ((*n < 0) || (*n > *m)) {
*info = -2;
- } else if (*k < 0 || *k > *n) {
+ } else if ((*k < 0) || (*k > *n)) {
*info = -3;
} else if (*lda < max(1,*m)) {
*info = -5;
@@ -22774,7 +23762,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -22784,18 +23772,18 @@ L130:
wantq = lsame_(vect, "Q");
mn = min(*m,*n);
lquery = *lwork == -1;
- if ((! wantq && ! lsame_(vect, "P"))) {
+ if (! wantq && ! lsame_(vect, "P")) {
*info = -1;
} else if (*m < 0) {
*info = -2;
- } else if (*n < 0 || (wantq && (*n > *m || *n < min(*m,*k))) || (! wantq
- && (*m > *n || *m < min(*n,*k)))) {
+ } else if (((*n < 0) || (wantq && ((*n > *m) || (*n < min(*m,*k))))) || (!
+ wantq && ((*m > *n) || (*m < min(*n,*k))))) {
*info = -3;
} else if (*k < 0) {
*info = -4;
} else if (*lda < max(1,*m)) {
*info = -6;
- } else if ((*lwork < max(1,mn) && ! lquery)) {
+ } else if (*lwork < max(1,mn) && ! lquery) {
*info = -9;
}
@@ -22821,7 +23809,7 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
work[1].r = 1., work[1].i = 0.;
return 0;
}
@@ -23029,7 +24017,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -23040,13 +24028,13 @@ L130:
lquery = *lwork == -1;
if (*n < 0) {
*info = -1;
- } else if (*ilo < 1 || *ilo > max(1,*n)) {
+ } else if ((*ilo < 1) || (*ilo > max(1,*n))) {
*info = -2;
- } else if (*ihi < min(*ilo,*n) || *ihi > *n) {
+ } else if ((*ihi < min(*ilo,*n)) || (*ihi > *n)) {
*info = -3;
} else if (*lda < max(1,*n)) {
*info = -5;
- } else if ((*lwork < max(1,nh) && ! lquery)) {
+ } else if (*lwork < max(1,nh) && ! lquery) {
*info = -8;
}
@@ -23217,7 +24205,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -23228,7 +24216,7 @@ L130:
*info = -1;
} else if (*n < *m) {
*info = -2;
- } else if (*k < 0 || *k > *m) {
+ } else if ((*k < 0) || (*k > *m)) {
*info = -3;
} else if (*lda < max(1,*m)) {
*info = -5;
@@ -23257,7 +24245,7 @@ L130:
a[i__3].r = 0., a[i__3].i = 0.;
/* L10: */
}
- if ((j > *k && j <= *m)) {
+ if (j > *k && j <= *m) {
i__2 = j + j * a_dim1;
a[i__2].r = 1., a[i__2].i = 0.;
}
@@ -23404,7 +24392,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -23419,11 +24407,11 @@ L130:
*info = -1;
} else if (*n < *m) {
*info = -2;
- } else if (*k < 0 || *k > *m) {
+ } else if ((*k < 0) || (*k > *m)) {
*info = -3;
} else if (*lda < max(1,*m)) {
*info = -5;
- } else if ((*lwork < max(1,*m) && ! lquery)) {
+ } else if (*lwork < max(1,*m) && ! lquery) {
*info = -8;
}
if (*info != 0) {
@@ -23444,7 +24432,7 @@ L130:
nbmin = 2;
nx = 0;
iws = *m;
- if ((nb > 1 && nb < *k)) {
+ if (nb > 1 && nb < *k) {
/*
Determine when to cross over from blocked to unblocked code.
@@ -23476,7 +24464,7 @@ L130:
}
}
- if (((nb >= nbmin && nb < *k) && nx < *k)) {
+ if (nb >= nbmin && nb < *k && nx < *k) {
/*
Use blocked code after the last block.
@@ -23669,7 +24657,7 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
--work;
@@ -23682,13 +24670,13 @@ L130:
lquery = *lwork == -1;
if (*m < 0) {
*info = -1;
- } else if (*n < 0 || *n > *m) {
+ } else if ((*n < 0) || (*n > *m)) {
*info = -2;
- } else if (*k < 0 || *k > *n) {
+ } else if ((*k < 0) || (*k > *n)) {
*info = -3;
} else if (*lda < max(1,*m)) {
*info = -5;
- } else if ((*lwork < max(1,*n) && ! lquery)) {
+ } else if (*lwork < max(1,*n) && ! lquery) {
*info = -8;
}
if (*info != 0) {
@@ -23709,7 +24697,7 @@ L130:
nbmin = 2;
nx = 0;
iws = *n;
- if ((nb > 1 && nb < *k)) {
+ if (nb > 1 && nb < *k) {
/*
Determine when to cross over from blocked to unblocked code.
@@ -23741,7 +24729,7 @@ L130:
}
}
- if (((nb >= nbmin && nb < *k) && nx < *k)) {
+ if (nb >= nbmin && nb < *k && nx < *k) {
/*
Use blocked code after the last block.
@@ -23950,11 +24938,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -23970,15 +24958,15 @@ L130:
} else {
nq = *n;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,nq)) {
*info = -7;
@@ -23993,7 +24981,7 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
return 0;
}
@@ -24165,11 +25153,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -24185,15 +25173,15 @@ L130:
} else {
nq = *n;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,nq)) {
*info = -7;
@@ -24208,7 +25196,7 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
return 0;
}
@@ -24423,11 +25411,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -24447,11 +25435,11 @@ L130:
nq = *n;
nw = *m;
}
- if ((! applyq && ! lsame_(vect, "P"))) {
+ if (! applyq && ! lsame_(vect, "P")) {
*info = -1;
- } else if ((! left && ! lsame_(side, "R"))) {
+ } else if (! left && ! lsame_(side, "R")) {
*info = -2;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -3;
} else if (*m < 0) {
*info = -4;
@@ -24467,7 +25455,7 @@ L130:
*info = -8;
} else if (*ldc < max(1,*m)) {
*info = -11;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
+ } else if (*lwork < max(1,nw) && ! lquery) {
*info = -13;
}
}
@@ -24528,7 +25516,7 @@ L130:
/* Quick return if possible */
work[1].r = 1., work[1].i = 0.;
- if (*m == 0 || *n == 0) {
+ if ((*m == 0) || (*n == 0)) {
return 0;
}
@@ -24716,11 +25704,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -24736,15 +25724,15 @@ L130:
} else {
nq = *n;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,*k)) {
*info = -7;
@@ -24759,7 +25747,7 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
return 0;
}
@@ -24968,11 +25956,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -24991,21 +25979,21 @@ L130:
nq = *n;
nw = *m;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,*k)) {
*info = -7;
} else if (*ldc < max(1,*m)) {
*info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
+ } else if (*lwork < max(1,nw) && ! lquery) {
*info = -12;
}
@@ -25038,14 +26026,14 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
work[1].r = 1., work[1].i = 0.;
return 0;
}
nbmin = 2;
ldwork = nw;
- if ((nb > 1 && nb < *k)) {
+ if (nb > 1 && nb < *k) {
iws = nw * nb;
if (*lwork < iws) {
nb = *lwork / ldwork;
@@ -25064,7 +26052,7 @@ L130:
iws = nw;
}
- if (nb < nbmin || nb >= *k) {
+ if ((nb < nbmin) || (nb >= *k)) {
/* Use unblocked code */
@@ -25278,11 +26266,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -25301,21 +26289,21 @@ L130:
nq = *n;
nw = *m;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,nq)) {
*info = -7;
} else if (*ldc < max(1,*m)) {
*info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
+ } else if (*lwork < max(1,nw) && ! lquery) {
*info = -12;
}
@@ -25348,14 +26336,14 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
work[1].r = 1., work[1].i = 0.;
return 0;
}
nbmin = 2;
ldwork = nw;
- if ((nb > 1 && nb < *k)) {
+ if (nb > 1 && nb < *k) {
iws = nw * nb;
if (*lwork < iws) {
nb = *lwork / ldwork;
@@ -25374,7 +26362,7 @@ L130:
iws = nw;
}
- if (nb < nbmin || nb >= *k) {
+ if ((nb < nbmin) || (nb >= *k)) {
/* Use unblocked code */
@@ -25578,11 +26566,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -25601,21 +26589,21 @@ L130:
nq = *n;
nw = *m;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! notran && ! lsame_(trans, "C"))) {
+ } else if (! notran && ! lsame_(trans, "C")) {
*info = -2;
} else if (*m < 0) {
*info = -3;
} else if (*n < 0) {
*info = -4;
- } else if (*k < 0 || *k > nq) {
+ } else if ((*k < 0) || (*k > nq)) {
*info = -5;
} else if (*lda < max(1,nq)) {
*info = -7;
} else if (*ldc < max(1,*m)) {
*info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
+ } else if (*lwork < max(1,nw) && ! lquery) {
*info = -12;
}
@@ -25648,14 +26636,14 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || *k == 0) {
+ if (((*m == 0) || (*n == 0)) || (*k == 0)) {
work[1].r = 1., work[1].i = 0.;
return 0;
}
nbmin = 2;
ldwork = nw;
- if ((nb > 1 && nb < *k)) {
+ if (nb > 1 && nb < *k) {
iws = nw * nb;
if (*lwork < iws) {
nb = *lwork / ldwork;
@@ -25674,7 +26662,7 @@ L130:
iws = nw;
}
- if (nb < nbmin || nb >= *k) {
+ if ((nb < nbmin) || (nb >= *k)) {
/* Use unblocked code */
@@ -25876,11 +26864,11 @@ L130:
/* Parameter adjustments */
a_dim1 = *lda;
- a_offset = 1 + a_dim1 * 1;
+ a_offset = 1 + a_dim1;
a -= a_offset;
--tau;
c_dim1 = *ldc;
- c_offset = 1 + c_dim1 * 1;
+ c_offset = 1 + c_dim1;
c__ -= c_offset;
--work;
@@ -25899,12 +26887,12 @@ L130:
nq = *n;
nw = *m;
}
- if ((! left && ! lsame_(side, "R"))) {
+ if (! left && ! lsame_(side, "R")) {
*info = -1;
- } else if ((! upper && ! lsame_(uplo, "L"))) {
+ } else if (! upper && ! lsame_(uplo, "L")) {
*info = -2;
- } else if ((! lsame_(trans, "N") && ! lsame_(trans,
- "C"))) {
+ } else if (! lsame_(trans, "N") && ! lsame_(trans,
+ "C")) {
*info = -3;
} else if (*m < 0) {
*info = -4;
@@ -25914,7 +26902,7 @@ L130:
*info = -7;
} else if (*ldc < max(1,*m)) {
*info = -10;
- } else if ((*lwork < max(1,nw) && ! lquery)) {
+ } else if (*lwork < max(1,nw) && ! lquery) {
*info = -12;
}
@@ -25974,7 +26962,7 @@ L130:
/* Quick return if possible */
- if (*m == 0 || *n == 0 || nq == 1) {
+ if (((*m == 0) || (*n == 0)) || (nq == 1)) {
work[1].r = 1., work[1].i = 0.;
return 0;
}
diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c
index 3e1790a18..2f142f105 100644
--- a/numpy/linalg/lapack_litemodule.c
+++ b/numpy/linalg/lapack_litemodule.c
@@ -93,6 +93,8 @@ extern int FNAME(zungqr)(int *m, int *n, int *k, f2c_doublecomplex a[],
int *lda, f2c_doublecomplex tau[],
f2c_doublecomplex work[], int *lwork, int *info);
+extern int FNAME(xerbla)(char *srname, int *info);
+
static PyObject *LapackError;
#define TRY(E) if (!(E)) return NULL
@@ -171,6 +173,9 @@ lapack_lite_dgeev(PyObject *NPY_UNUSED(self), PyObject *args)
FNAME(dgeev)(&jobvl,&jobvr,&n,DDATA(a),&lda,DDATA(wr),DDATA(wi),
DDATA(vl),&ldvl,DDATA(vr),&ldvr,DDATA(work),&lwork,
&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","dgeev_",
lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr,
@@ -254,6 +259,9 @@ lapack_lite_dsyevd(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(dsyevd)(&jobz,&uplo,&n,DDATA(a),&lda,DDATA(w),DDATA(work),
&lwork,IDATA(iwork),&liwork,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i}","dsyevd_",
lapack_lite_status__,"jobz",jobz,"uplo",uplo,
@@ -341,6 +349,9 @@ lapack_lite_zheevd(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zheevd)(&jobz,&uplo,&n,ZDATA(a),&lda,DDATA(w),ZDATA(work),
&lwork,DDATA(rwork),&lrwork,IDATA(iwork),&liwork,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zheevd_",
lapack_lite_status__,"jobz",jobz,"uplo",uplo,"n",n,
@@ -380,6 +391,9 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb,
DDATA(s),&rcond,&rank,DDATA(work),&lwork,
IDATA(iwork),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_",
lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs,
@@ -407,6 +421,9 @@ lapack_lite_dgesv(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(dgesv)(&n,&nrhs,DDATA(a),&lda,IDATA(ipiv),DDATA(b),&ldb,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgesv_",
lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda,
@@ -446,6 +463,9 @@ lapack_lite_dgesdd(PyObject *NPY_UNUSED(self), PyObject *args)
FNAME(dgesdd)(&jobz,&m,&n,DDATA(a),&lda,DDATA(s),DDATA(u),&ldu,
DDATA(vt),&ldvt,DDATA(work),&lwork,IDATA(iwork),
&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
if (info == 0 && lwork == -1) {
/* We need to check the result because
@@ -496,6 +516,9 @@ lapack_lite_dgetrf(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(dgetrf)(&m,&n,DDATA(a),&lda,IDATA(ipiv),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","dgetrf_",lapack_lite_status__,
"m",m,"n",n,"lda",lda,"info",info);
@@ -516,6 +539,9 @@ lapack_lite_dpotrf(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(dpotrf)(&uplo,&n,DDATA(a),&lda,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i}","dpotrf_",lapack_lite_status__,
"n",n,"lda",lda,"info",info);
@@ -540,6 +566,9 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(dgeqrf)(&m, &n, DDATA(a), &lda, DDATA(tau),
DDATA(work), &lwork, &info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_",
lapack_lite_status__,"m",m,"n",n,"lda",lda,
@@ -562,6 +591,9 @@ lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dorgqr"));
lapack_lite_status__ = \
FNAME(dorgqr)(&m, &n, &k, DDATA(a), &lda, DDATA(tau), DDATA(work), &lwork, &info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i}","dorgqr_",lapack_lite_status__,
"info",info);
@@ -601,6 +633,9 @@ lapack_lite_zgeev(PyObject *NPY_UNUSED(self), PyObject *args)
FNAME(zgeev)(&jobvl,&jobvr,&n,ZDATA(a),&lda,ZDATA(w),ZDATA(vl),
&ldvl,ZDATA(vr),&ldvr,ZDATA(work),&lwork,
DDATA(rwork),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:c,s:c,s:i,s:i,s:i,s:i,s:i,s:i}","zgeev_",
lapack_lite_status__,"jobvl",jobvl,"jobvr",jobvr,
@@ -641,6 +676,9 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond,
&rank,ZDATA(work),&lwork,DDATA(rwork),IDATA(iwork),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_",
lapack_lite_status__,"m",m,"n",n,"nrhs",nrhs,"lda",lda,
@@ -667,6 +705,9 @@ lapack_lite_zgesv(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zgesv)(&n,&nrhs,ZDATA(a),&lda,IDATA(ipiv),ZDATA(b),&ldb,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgesv_",
lapack_lite_status__,"n",n,"nrhs",nrhs,"lda",lda,
@@ -708,6 +749,9 @@ lapack_lite_zgesdd(PyObject *NPY_UNUSED(self), PyObject *args)
FNAME(zgesdd)(&jobz,&m,&n,ZDATA(a),&lda,DDATA(s),ZDATA(u),&ldu,
ZDATA(vt),&ldvt,ZDATA(work),&lwork,DDATA(rwork),
IDATA(iwork),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:c,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgesdd_",
lapack_lite_status__,"jobz",jobz,"m",m,"n",n,
@@ -732,6 +776,9 @@ lapack_lite_zgetrf(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zgetrf)(&m,&n,ZDATA(a),&lda,IDATA(ipiv),&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i}","zgetrf_",
lapack_lite_status__,"m",m,"n",n,"lda",lda,"info",info);
@@ -751,6 +798,9 @@ lapack_lite_zpotrf(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zpotrf"));
lapack_lite_status__ = \
FNAME(zpotrf)(&uplo,&n,ZDATA(a),&lda,&info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i}","zpotrf_",
lapack_lite_status__,"n",n,"lda",lda,"info",info);
@@ -774,6 +824,9 @@ lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zgeqrf)(&m, &n, ZDATA(a), &lda, ZDATA(tau), ZDATA(work), &lwork, &info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status__,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
}
@@ -797,12 +850,32 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
lapack_lite_status__ = \
FNAME(zungqr)(&m, &n, &k, ZDATA(a), &lda, ZDATA(tau), ZDATA(work),
&lwork, &info);
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status__,
"info",info);
}
+static PyObject *
+lapack_lite_xerbla(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ int info = -1;
+
+ NPY_BEGIN_THREADS_DEF;
+ NPY_BEGIN_THREADS;
+ FNAME(xerbla)("test", &info);
+ NPY_END_THREADS;
+
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
#define STR(x) #x
#define lameth(name) {STR(name), lapack_lite_##name, METH_VARARGS, NULL}
@@ -825,6 +898,7 @@ static struct PyMethodDef lapack_lite_module_methods[] = {
lameth(zpotrf),
lameth(zgeqrf),
lameth(zungqr),
+ lameth(xerbla),
{ NULL,NULL,0, NULL}
};
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 01824056e..851db6e8c 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -8,19 +8,25 @@ version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
-from numpy.core import array, asarray, zeros, empty, transpose, \
- intc, single, double, csingle, cdouble, inexact, complexfloating, \
- newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
- maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
- isfinite, size, finfo, absolute, log, exp
-from numpy.lib import triu
-from numpy.linalg import lapack_lite
+import warnings
+
+from numpy.core import (
+ array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
+ csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
+ add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
+ finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product,
+ broadcast
+ )
+from numpy.lib import triu, asfarray
+from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
@@ -63,6 +69,40 @@ class LinAlgError(Exception):
"""
pass
+# Dealing with errors in _umath_linalg
+
+_linalg_error_extobj = None
+
+def _determine_error_states():
+ global _linalg_error_extobj
+ errobj = geterrobj()
+ bufsize = errobj[0]
+
+ with errstate(invalid='call', over='ignore',
+ divide='ignore', under='ignore'):
+ invalid_call_errmask = geterrobj()[1]
+
+ _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
+
+_determine_error_states()
+
+def _raise_linalgerror_singular(err, flag):
+ raise LinAlgError("Singular matrix")
+
+def _raise_linalgerror_nonposdef(err, flag):
+ raise LinAlgError("Matrix is not positive definite")
+
+def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
+ raise LinAlgError("Eigenvalues did not converge")
+
+def _raise_linalgerror_svd_nonconvergence(err, flag):
+ raise LinAlgError("SVD did not converge")
+
+def get_linalg_error_extobj(callback):
+ extobj = list(_linalg_error_extobj)
+ extobj[2] = callback
+ return extobj
+
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
@@ -120,6 +160,7 @@ def _commonType(*arrays):
t = double
return t, result_type
+
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
@@ -154,19 +195,30 @@ def _assertRank2(*arrays):
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
+def _assertRankAtLeast2(*arrays):
+ for a in arrays:
+ if len(a.shape) < 2:
+ raise LinAlgError('%d-dimensional array given. Array must be '
+ 'at least two-dimensional' % len(a.shape))
+
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
+def _assertNdSquareness(*arrays):
+ for a in arrays:
+ if max(a.shape[-2:]) != min(a.shape[-2:]):
+ raise LinAlgError('Last 2 dimensions of the array must be square')
+
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
-def _assertNonEmpty(*arrays):
+def _assertNoEmpty2d(*arrays):
for a in arrays:
- if size(a) == 0:
+ if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
@@ -224,7 +276,7 @@ def tensorsolve(a, b, axes=None):
an = a.ndim
if axes is not None:
- allaxes = range(0, an)
+ allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
@@ -250,14 +302,14 @@ def solve(a, b):
Parameters
----------
- a : (M, M) array_like
+ a : (..., M, M) array_like
Coefficient matrix.
- b : {(M,), (M, N)}, array_like
+ b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
- x : {(M,), (M, N)} ndarray
+ x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
@@ -267,15 +319,10 @@ def solve(a, b):
Notes
-----
- `solve` is a wrapper for the LAPACK routines `dgesv`_ and
- `zgesv`_, the former being used if `a` is real-valued, the latter if
- it is complex-valued. The solution to the system of linear equations
- is computed using an LU decomposition [1]_ with partial pivoting and
- row interchanges.
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
- .. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
-
- .. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
+ The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
@@ -304,32 +351,35 @@ def solve(a, b):
"""
a, _ = _makearray(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
b, wrap = _makearray(b)
- one_eq = len(b.shape) == 1
- if one_eq:
- b = b[:, newaxis]
- _assertRank2(a, b)
- _assertSquareness(a)
- n_eq = a.shape[0]
- n_rhs = b.shape[1]
- if n_eq != b.shape[0]:
- raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
-# lapack_routine = _findLapackRoutine('gesv', t)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgesv
- else:
- lapack_routine = lapack_lite.dgesv
- a, b = _fastCopyAndTranspose(t, a, b)
- a, b = _to_native_byte_order(a, b)
- pivots = zeros(n_eq, fortran_int)
- results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
- if results['info'] > 0:
- raise LinAlgError('Singular matrix')
- if one_eq:
- return wrap(b.ravel().astype(result_t))
+
+ # We use the b = (..., M,) logic, only if the number of extra dimensions
+ # match exactly
+ if b.ndim == a.ndim - 1:
+ if a.shape[-1] == 0 and b.shape[-1] == 0:
+ # Legal, but the ufunc cannot handle the 0-sized inner dims
+ # let the ufunc handle all wrong cases.
+ a = a.reshape(a.shape[:-1])
+ bc = broadcast(a, b)
+ return wrap(empty(bc.shape, dtype=result_t))
+
+ gufunc = _umath_linalg.solve1
else:
- return wrap(b.transpose().astype(result_t))
+ if a.shape[-1] == 0 and b.shape[-2] == 0:
+ a = a.reshape(a.shape[:-1] + (1,))
+ bc = broadcast(a, b)
+ return wrap(empty(bc.shape, dtype=result_t))
+
+ gufunc = _umath_linalg.solve
+
+ signature = 'DD->D' if isComplexType(t) else 'dd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
+ r = gufunc(a, b, signature=signature, extobj=extobj)
+
+ return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
@@ -410,24 +460,29 @@ def inv(a):
Parameters
----------
- a : (M, M) array_like
+ a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
- ainv : (M, M) ndarray or matrix
+ ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
- If `a` is singular or not square.
+ If `a` is not square or inversion fails.
+
+ Notes
+ -----
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
Examples
--------
- >>> from numpy import linalg as LA
+ >>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
- >>> ainv = LA.inv(a)
+ >>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
@@ -435,14 +490,34 @@ def inv(a):
If a is a matrix object, then the return value is a matrix as well:
- >>> ainv = LA.inv(np.matrix(a))
+ >>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
+ Inverses of several matrices can be computed at once:
+
+ >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
+ >>> inv(a)
+ array([[[-2. , 1. ],
+ [ 1.5, -0.5]],
+ [[-5. , 2. ],
+ [ 3. , -1. ]]])
+
"""
a, wrap = _makearray(a)
- return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
+ t, result_t = _commonType(a)
+
+ if a.shape[-1] == 0:
+ # The inner array is 0x0, the ufunc cannot handle this case
+ return wrap(empty_like(a, dtype=result_t))
+
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
+ ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
+ return wrap(ainv.astype(result_t))
# Cholesky decomposition
@@ -459,15 +534,15 @@ def cholesky(a):
Parameters
----------
- a : (M, M) array_like
+ a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
- L : {(M, M) ndarray, (M, M) matrix}
- Lower-triangular Cholesky factor of `a`. Returns a matrix object
- if `a` is a matrix object.
+ L : (..., M, M) array_like
+ Upper or lower-triangular Cholesky factor of `a`. Returns a
+ matrix object if `a` is a matrix object.
Raises
------
@@ -477,6 +552,9 @@ def cholesky(a):
Notes
-----
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
@@ -514,30 +592,18 @@ def cholesky(a):
[ 0.+2.j, 1.+0.j]])
"""
+ extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
+ gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
t, result_t = _commonType(a)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- m = a.shape[0]
- n = a.shape[1]
- if isComplexType(t):
- lapack_routine = lapack_lite.zpotrf
- else:
- lapack_routine = lapack_lite.dpotrf
- results = lapack_routine(_L, n, a, m, 0)
- if results['info'] > 0:
- raise LinAlgError('Matrix is not positive definite - '
- 'Cholesky decomposition cannot be computed')
- s = triu(a, k=0).transpose()
- if (s.dtype != result_t):
- s = s.astype(result_t)
- return wrap(s)
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
-def qr(a, mode='full'):
+def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
@@ -546,24 +612,42 @@ def qr(a, mode='full'):
Parameters
----------
- a : array_like
- Matrix to be factored, of shape (M, N).
- mode : {'full', 'r', 'economic'}, optional
- Specifies the values to be returned. 'full' is the default.
- Economic mode is slightly faster then 'r' mode if only `r` is needed.
+ a : array_like, shape (M, N)
+ Matrix to be factored.
+ mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
+ If K = min(M, N), then
+
+ 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
+ 'complete' : returns q, r with dimensions (M, M), (M, N)
+ 'r' : returns r only with dimensions (K, N)
+ 'raw' : returns h, tau with dimensions (N, M), (K,)
+ 'full' : alias of 'reduced', deprecated
+ 'economic' : returns h from 'raw', deprecated.
+
+ The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
+ see the notes for more information. The default is 'reduced' and to
+ maintain backward compatibility with earlier versions of numpy both
+ it and the old default 'full' can be omitted. Note that array h
+ returned in 'raw' mode is transposed for calling Fortran. The
+ 'economic' mode is deprecated. The modes 'full' and 'economic' may
+ be passed using only the first letter for backwards compatibility,
+ but all others must be spelled out. See the Notes for more
+ explanation.
+
Returns
-------
q : ndarray of float or complex, optional
- The orthonormal matrix, of shape (M, K). Only returned if
- ``mode='full'``.
+ A matrix with orthonormal columns. When mode = 'complete' the
+ result is an orthogonal/unitary matrix depending on whether or not
+ a is real/complex. The determinant may be either +/- 1 in that
+ case.
r : ndarray of float or complex, optional
- The upper-triangular matrix, of shape (K, N) with K = min(M, N).
- Only returned when ``mode='full'`` or ``mode='r'``.
- a2 : ndarray of float or complex, optional
- Array of shape (M, N), only returned when ``mode='economic``'.
- The diagonal and the upper triangle of `a2` contains `r`, while
- the rest of the matrix is undefined.
+ The upper-triangular matrix.
+ (h, tau) : ndarrays of np.double or np.cdouble, optional
+ The array h contains the Householder reflectors that generate q
+ along with r. The tau array contains scaling factors for the
+ reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
@@ -578,8 +662,20 @@ def qr(a, mode='full'):
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
- Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
- all the return values will be matrices too.
+ Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
+ `a` is of type `matrix`, all the return values will be matrices too.
+
+ New 'reduced', 'complete', and 'raw' options for mode were added in
+ Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
+ addition the options 'full' and 'economic' were deprecated. Because
+ 'full' was the previous default and 'reduced' is the new default,
+ backward compatibility can be maintained by letting `mode` default.
+ The 'raw' option was added so that LAPACK routines that can multiply
+ arrays by q using the Householder reflectors can be used. Note that in
+ this case the returned arrays are of type np.double or np.cdouble and
+ the h array is transposed to be FORTRAN compatible. No routines using
+ the 'raw' return are currently exposed by numpy, but some are available
+ in lapack_lite and just await the necessary work.
Examples
--------
@@ -624,9 +720,23 @@ def qr(a, mode='full'):
array([ 1.1e-16, 1.0e+00])
"""
+ if mode not in ('reduced', 'complete', 'r', 'raw'):
+ if mode in ('f', 'full'):
+ msg = "".join((
+ "The 'full' option is deprecated in favor of 'reduced'.\n",
+ "For backward compatibility let mode default."))
+ warnings.warn(msg, DeprecationWarning)
+ mode = 'reduced'
+ elif mode in ('e', 'economic'):
+ msg = "The 'economic' option is deprecated.",
+ warnings.warn(msg, DeprecationWarning)
+ mode = 'economic'
+ else:
+ raise ValueError("Unrecognized mode '%s'" % mode)
+
a, wrap = _makearray(a)
_assertRank2(a)
- _assertNonEmpty(a)
+ _assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
@@ -651,26 +761,30 @@ def qr(a, mode='full'):
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
-
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- # economic mode. Isn't actually economic.
- if mode[0] == 'e':
- if t != result_t :
- a = a.astype(result_t)
- return a.T
+ # handle modes that don't return q
+ if mode == 'r':
+ r = _fastCopyAndTranspose(result_t, a[:,:mn])
+ return wrap(triu(r))
- # generate r
- r = _fastCopyAndTranspose(result_t, a[:,:mn])
- for i in range(mn):
- r[i,:i].fill(0.0)
+ if mode == 'raw':
+ return a, tau
- # 'r'-mode, that is, calculate only r
- if mode[0] == 'r':
- return r
+ if mode == 'economic':
+ if t != result_t :
+ a = a.astype(result_t)
+ return wrap(a.T)
- # from here on: build orthonormal matrix q from a
+ # generate q from a
+ if mode == 'complete' and m > n:
+ mc = m
+ q = empty((m, m), t)
+ else:
+ mc = mn
+ q = empty((n, m), t)
+ q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
@@ -682,20 +796,21 @@ def qr(a, mode='full'):
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
- results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
+ results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
- results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
+ results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- q = _fastCopyAndTranspose(result_t, a[:mn,:])
+ q = _fastCopyAndTranspose(result_t, q[:mc])
+ r = _fastCopyAndTranspose(result_t, a[:,:mc])
- return wrap(q), wrap(r)
+ return wrap(q), wrap(triu(r))
# Eigenvalues
@@ -710,12 +825,12 @@ def eigvals(a):
Parameters
----------
- a : (M, M) array_like
+ a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
- w : (M,) ndarray
+ w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
@@ -733,9 +848,11 @@ def eigvals(a):
Notes
-----
- This is a simple interface to the LAPACK routines dgeev and zgeev
- that sets those routines' flags to return only the eigenvalues of
- general real and complex arrays, respectively.
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ This is implemented using the _geev LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
Examples
--------
@@ -764,49 +881,25 @@ def eigvals(a):
"""
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertNoEmpty2d(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
- real_t = _linalgRealType(t)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- n = a.shape[0]
- dummy = zeros((1,), t)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgeev
- w = zeros((n,), t)
- rwork = zeros((n,), real_t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _N, n, a, n, w,
- dummy, 1, dummy, 1, work, -1, rwork, 0)
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _N, n, a, n, w,
- dummy, 1, dummy, 1, work, lwork, rwork, 0)
- else:
- lapack_routine = lapack_lite.dgeev
- wr = zeros((n,), t)
- wi = zeros((n,), t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _N, n, a, n, wr, wi,
- dummy, 1, dummy, 1, work, -1, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _N, n, a, n, wr, wi,
- dummy, 1, dummy, 1, work, lwork, 0)
- if all(wi == 0.):
- w = wr
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ signature = 'D->D' if isComplexType(t) else 'd->D'
+ w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
+
+ if not isComplexType(t):
+ if all(w.imag == 0):
+ w = w.real
result_t = _realType(result_t)
else:
- w = wr+1j*wi
result_t = _complexType(result_t)
- if results['info'] > 0:
- raise LinAlgError('Eigenvalues did not converge')
- return w.astype(result_t)
+ return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
@@ -816,16 +909,16 @@ def eigvalsh(a, UPLO='L'):
Parameters
----------
- a : (M, M) array_like
+ a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
- Specifies whether the calculation is done with the lower triangular
- part of `a` ('L', default) or the upper triangular part ('U').
+ Same as `lower`, wth 'L' for lower and 'U' for upper triangular.
+ Deprecated.
Returns
-------
- w : (M,) ndarray
+ w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
@@ -843,9 +936,10 @@ def eigvalsh(a, UPLO='L'):
Notes
-----
- This is a simple interface to the LAPACK routines dsyevd and zheevd
- that sets those routines' flags to return only the eigenvalues of
- real symmetric and complex Hermitian arrays, respectively.
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
@@ -855,45 +949,21 @@ def eigvalsh(a, UPLO='L'):
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
- UPLO = asbytes(UPLO)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ if UPLO == 'L':
+ gufunc = _umath_linalg.eigvalsh_lo
+ else:
+ gufunc = _umath_linalg.eigvalsh_up
+
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertNoEmpty2d(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
t, result_t = _commonType(a)
- real_t = _linalgRealType(t)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- n = a.shape[0]
- liwork = 5*n+3
- iwork = zeros((liwork,), fortran_int)
- if isComplexType(t):
- lapack_routine = lapack_lite.zheevd
- w = zeros((n,), real_t)
- lwork = 1
- work = zeros((lwork,), t)
- lrwork = 1
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
- rwork, -1, iwork, liwork, 0)
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- lrwork = int(rwork[0])
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
- rwork, lrwork, iwork, liwork, 0)
- else:
- lapack_routine = lapack_lite.dsyevd
- w = zeros((n,), t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
- iwork, liwork, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
- iwork, liwork, 0)
- if results['info'] > 0:
- raise LinAlgError('Eigenvalues did not converge')
+ signature = 'D->d' if isComplexType(t) else 'd->d'
+ w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(result_t)
def _convertarray(a):
@@ -911,17 +981,20 @@ def eig(a):
Parameters
----------
- a : (M, M) array_like
- A square array of real or complex elements.
+ a : (..., M, M) array
+ Matrices for which the eigenvalues and right eigenvectors will
+ be computed
Returns
-------
- w : (M,) ndarray
+ w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
- The eigenvalues are not necessarily ordered, nor are they
- necessarily real for real arrays (though for real arrays
- complex-valued eigenvalues should occur in conjugate pairs).
- v : (M, M) ndarray
+ The eigenvalues are not necessarily ordered. The resulting
+ array will be always be of complex type. When `a` is real
+ the resulting eigenvalues will be real (0 imaginary part) or
+ occur in conjugate pairs
+
+ v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
@@ -940,9 +1013,11 @@ def eig(a):
Notes
-----
- This is a simple interface to the LAPACK routines dgeev and zgeev
- which compute the eigenvalues and eigenvectors of, respectively,
- general real- and complex-valued square arrays.
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ This is implemented using the _geev LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
@@ -1013,57 +1088,24 @@ def eig(a):
"""
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
_assertFinite(a)
- a, t, result_t = _convertarray(a) # convert to double or cdouble type
- a = _to_native_byte_order(a)
- real_t = _linalgRealType(t)
- n = a.shape[0]
- dummy = zeros((1,), t)
- if isComplexType(t):
- # Complex routines take different arguments
- lapack_routine = lapack_lite.zgeev
- w = zeros((n,), t)
- v = zeros((n, n), t)
- lwork = 1
- work = zeros((lwork,), t)
- rwork = zeros((2*n,), real_t)
- results = lapack_routine(_N, _V, n, a, n, w,
- dummy, 1, v, n, work, -1, rwork, 0)
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _V, n, a, n, w,
- dummy, 1, v, n, work, lwork, rwork, 0)
+ t, result_t = _commonType(a)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ signature = 'D->DD' if isComplexType(t) else 'd->DD'
+ w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
+
+ if not isComplexType(t) and all(w.imag == 0.0):
+ w = w.real
+ vt = vt.real
+ result_t = _realType(result_t)
else:
- lapack_routine = lapack_lite.dgeev
- wr = zeros((n,), t)
- wi = zeros((n,), t)
- vr = zeros((n, n), t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _V, n, a, n, wr, wi,
- dummy, 1, vr, n, work, -1, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(_N, _V, n, a, n, wr, wi,
- dummy, 1, vr, n, work, lwork, 0)
- if all(wi == 0.0):
- w = wr
- v = vr
- result_t = _realType(result_t)
- else:
- w = wr+1j*wi
- v = array(vr, w.dtype)
- ind = flatnonzero(wi != 0.0) # indices of complex e-vals
- for i in range(len(ind)//2):
- v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
- v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
- result_t = _complexType(result_t)
+ result_t = _complexType(result_t)
- if results['info'] > 0:
- raise LinAlgError('Eigenvalues did not converge')
- vt = v.transpose().astype(result_t)
+ vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
@@ -1077,17 +1119,18 @@ def eigh(a, UPLO='L'):
Parameters
----------
- a : (M, M) array_like
- A complex Hermitian or real symmetric matrix.
+ A : (..., M, M) array
+ Hermitian/Symmetric matrices whose eigenvalues and
+ eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
- w : (M,) ndarray
+ w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
- v : {(M, M) ndarray, (M, M) matrix}
+ v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
@@ -1105,9 +1148,11 @@ def eigh(a, UPLO='L'):
Notes
-----
- This is a simple interface to the LAPACK routines dsyevd and zheevd,
- which compute the eigenvalues and eigenvectors of real symmetric and
- complex Hermitian arrays, respectively.
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
+ _heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
@@ -1149,46 +1194,24 @@ def eigh(a, UPLO='L'):
"""
UPLO = asbytes(UPLO)
+
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
t, result_t = _commonType(a)
- real_t = _linalgRealType(t)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- n = a.shape[0]
- liwork = 5*n+3
- iwork = zeros((liwork,), fortran_int)
- if isComplexType(t):
- lapack_routine = lapack_lite.zheevd
- w = zeros((n,), real_t)
- lwork = 1
- work = zeros((lwork,), t)
- lrwork = 1
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
- rwork, -1, iwork, liwork, 0)
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- lrwork = int(rwork[0])
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
- rwork, lrwork, iwork, liwork, 0)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ if 'L' == UPLO:
+ gufunc = _umath_linalg.eigh_lo
else:
- lapack_routine = lapack_lite.dsyevd
- w = zeros((n,), t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
- iwork, liwork, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
- iwork, liwork, 0)
- if results['info'] > 0:
- raise LinAlgError('Eigenvalues did not converge')
- at = a.transpose().astype(result_t)
- return w.astype(_realType(result_t)), wrap(at)
+ gufunc = _umath_linalg.eigh_up
+
+ signature = 'D->dD' if isComplexType(t) else 'd->dd'
+ w, vt = gufunc(a, signature=signature, extobj=extobj)
+ w = w.astype(_realType(result_t))
+ vt = vt.astype(result_t)
+ return w, wrap(vt)
# Singular value decomposition
@@ -1202,7 +1225,7 @@ def svd(a, full_matrices=1, compute_uv=1):
Parameters
----------
- a : array_like
+ a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
@@ -1214,15 +1237,14 @@ def svd(a, full_matrices=1, compute_uv=1):
Returns
-------
- u : ndarray
- Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
- depending on value of ``full_matrices``.
- s : ndarray
- The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
- a 1-d array of length min(`M`, `N`).
- v : ndarray
- Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
- ``full_matrices``.
+ u : { (..., M, M), (..., M, K) } array
+ Unitary matrices. The actual shape depends on the value of
+ ``full_matrices``. Only returned when ``compute_uv`` is True.
+ s : (..., K) array
+ The singular values for every matrix, sorted in descending order.
+ v : { (..., N, N), (..., K, N) } array
+ Unitary matrices. The actual shape depends on the value of
+ ``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
@@ -1231,6 +1253,11 @@ def svd(a, full_matrices=1, compute_uv=1):
Notes
-----
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The decomposition is performed using LAPACK routine _gesdd
+
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
@@ -1253,7 +1280,7 @@ def svd(a, full_matrices=1, compute_uv=1):
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
- ((9, 6), (6, 6), (6,))
+ ((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
@@ -1270,63 +1297,41 @@ def svd(a, full_matrices=1, compute_uv=1):
"""
a, wrap = _makearray(a)
- _assertRank2(a)
- _assertNonEmpty(a)
- m, n = a.shape
+ _assertNoEmpty2d(a)
+ _assertRankAtLeast2(a)
t, result_t = _commonType(a)
- real_t = _linalgRealType(t)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- s = zeros((min(n, m),), real_t)
+
+ extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
+
+ m = a.shape[-2]
+ n = a.shape[-1]
if compute_uv:
if full_matrices:
- nu = m
- nvt = n
- option = _A
+ if m < n:
+ gufunc = _umath_linalg.svd_m_f
+ else:
+ gufunc = _umath_linalg.svd_n_f
else:
- nu = min(n, m)
- nvt = min(n, m)
- option = _S
- u = zeros((nu, m), t)
- vt = zeros((n, nvt), t)
- else:
- option = _N
- nu = 1
- nvt = 1
- u = empty((1, 1), t)
- vt = empty((1, 1), t)
+ if m < n:
+ gufunc = _umath_linalg.svd_m_s
+ else:
+ gufunc = _umath_linalg.svd_n_s
- iwork = zeros((8*min(m, n),), fortran_int)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgesdd
- lrwork = min(m,n)*max(5*min(m,n)+7, 2*max(m,n)+2*min(m,n)+1)
- rwork = zeros((lrwork,), real_t)
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
- work, -1, rwork, iwork, 0)
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
- work, lwork, rwork, iwork, 0)
- else:
- lapack_routine = lapack_lite.dgesdd
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
- work, -1, iwork, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
- work, lwork, iwork, 0)
- if results['info'] > 0:
- raise LinAlgError('SVD did not converge')
- s = s.astype(_realType(result_t))
- if compute_uv:
- u = u.transpose().astype(result_t)
- vt = vt.transpose().astype(result_t)
+ signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
+ u, s, vt = gufunc(a, signature=signature, extobj=extobj)
+ u = u.astype(result_t)
+ s = s.astype(_realType(result_t))
+ vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
+ if m < n:
+ gufunc = _umath_linalg.svd_m
+ else:
+ gufunc = _umath_linalg.svd_n
+
+ signature = 'D->d' if isComplexType(t) else 'd->d'
+ s = gufunc(a, signature=signature, extobj=extobj)
+ s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
@@ -1569,7 +1574,7 @@ def pinv(a, rcond=1e-15 ):
"""
a, wrap = _makearray(a)
- _assertNonEmpty(a)
+ _assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
@@ -1596,16 +1601,16 @@ def slogdet(a):
Parameters
----------
- a : array_like
+ a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
- sign : float or complex
+ sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
- logdet : float
+ logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
@@ -1617,6 +1622,9 @@ def slogdet(a):
Notes
-----
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
@@ -1633,6 +1641,17 @@ def slogdet(a):
>>> sign * np.exp(logdet)
-2.0
+ Computing log-determinants for a stack of matrices:
+
+ >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
+ >>> a.shape
+ (3, 2, 2)
+ >>> sign, logdet = np.linalg.slogdet(a)
+ >>> (sign, logdet)
+ (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
+ >>> sign * np.exp(logdet)
+ array([-2., -3., -8.])
+
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
@@ -1642,30 +1661,14 @@ def slogdet(a):
"""
a = asarray(a)
- _assertRank2(a)
- _assertSquareness(a)
+ _assertNoEmpty2d(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
t, result_t = _commonType(a)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- n = a.shape[0]
- if isComplexType(t):
- lapack_routine = lapack_lite.zgetrf
- else:
- lapack_routine = lapack_lite.dgetrf
- pivots = zeros((n,), fortran_int)
- results = lapack_routine(n, n, a, n, pivots, 0)
- info = results['info']
- if (info < 0):
- raise TypeError("Illegal input to Fortran routine")
- elif (info > 0):
- return (t(0.0), _realType(t)(-Inf))
- sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
- d = diagonal(a)
- absd = absolute(d)
- sign *= multiply.reduce(d / absd)
- log(absd, absd)
- logdet = add.reduce(absd, axis=-1)
- return sign, logdet
+ real_t = _realType(result_t)
+ signature = 'D->Dd' if isComplexType(t) else 'd->dd'
+ sign, logdet = _umath_linalg.slogdet(a, signature=signature)
+ return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
@@ -1673,12 +1676,12 @@ def det(a):
Parameters
----------
- a : (M, M) array_like
- Input array.
+ a : (..., M, M) array_like
+ Input array to compute determinants for.
Returns
-------
- det : float
+ det : (...) array_like
Determinant of `a`.
See Also
@@ -1688,6 +1691,9 @@ def det(a):
Notes
-----
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
@@ -1699,9 +1705,22 @@ def det(a):
>>> np.linalg.det(a)
-2.0
+ Computing determinants for a stack of matrices:
+
+ >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
+ >>> a.shape
+ (2, 2, 2
+ >>> np.linalg.det(a)
+ array([-2., -3., -8.])
+
"""
- sign, logdet = slogdet(a)
- return sign * exp(logdet)
+ a = asarray(a)
+ _assertNoEmpty2d(a)
+ _assertRankAtLeast2(a)
+ _assertNdSquareness(a)
+ t, result_t = _commonType(a)
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
@@ -1732,9 +1751,9 @@ def lstsq(a, b, rcond=-1):
Returns
-------
- x : {(M,), (M, K)} ndarray
- Least-squares solution. The shape of `x` depends on the shape of
- `b`.
+ x : {(N,), (N, K)} ndarray
+ Least-squares solution. If `b` is two-dimensional,
+ the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
@@ -1865,7 +1884,38 @@ def lstsq(a, b, rcond=-1):
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
-def norm(x, ord=None):
+
+def _multi_svd_norm(x, row_axis, col_axis, op):
+ """Compute the extreme singular values of the 2-D matrices in `x`.
+
+ This is a private utility function used by numpy.linalg.norm().
+
+ Parameters
+ ----------
+ x : ndarray
+ row_axis, col_axis : int
+ The axes of `x` that hold the 2-D matrices.
+ op : callable
+ This should be either numpy.amin or numpy.amax.
+
+ Returns
+ -------
+ result : float or ndarray
+ If `x` is 2-D, the return values is a float.
+ Otherwise, it is an array with ``x.ndim - 2`` dimensions.
+ The return values are either the minimum or maximum of the
+ singular values of the matrices, depending on whether `op`
+ is `numpy.amin` or `numpy.amax`.
+
+ """
+ if row_axis > col_axis:
+ row_axis -= 1
+ y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
+ result = op(svd(y, compute_uv=0), axis=-1)
+ return result
+
+
+def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
@@ -1875,16 +1925,22 @@ def norm(x, ord=None):
Parameters
----------
- x : {(M,), (M, N)} array_like
- Input array.
+ x : array_like
+ Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
+ axis : {int, 2-tuple of ints, None}, optional
+ If `axis` is an integer, it specifies the axis of `x` along which to
+ compute the vector norms. If `axis` is a 2-tuple, it specifies the
+ axes that hold 2-D matrices, and the matrix norms of these matrices
+ are computed. If `axis` is None then either a vector norm (when `x`
+ is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
- n : float
- Norm of the matrix or vector.
+ n : float or ndarray
+ Norm of the matrix or vector(s).
Notes
-----
@@ -1967,44 +2023,94 @@ def norm(x, ord=None):
>>> LA.norm(a, -3)
nan
+ Using the `axis` argument to compute vector norms:
+
+ >>> c = np.array([[ 1, 2, 3],
+ ... [-1, 1, 4]])
+ >>> LA.norm(c, axis=0)
+ array([ 1.41421356, 2.23606798, 5. ])
+ >>> LA.norm(c, axis=1)
+ array([ 3.74165739, 4.24264069])
+ >>> LA.norm(c, ord=1, axis=1)
+ array([6, 6])
+
+ Using the `axis` argument to compute matrix norms:
+
+ >>> m = np.arange(8).reshape(2,2,2)
+ >>> LA.norm(m, axis=(1,2))
+ array([ 3.74165739, 11.22497216])
+ >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
+ (3.7416573867739413, 11.224972160321824)
+
"""
x = asarray(x)
- if ord is None: # check the default case first and handle it immediately
- return sqrt(add.reduce((x.conj() * x).ravel().real))
+ # Check the default case first and handle it immediately.
+ if ord is None and axis is None:
+ return sqrt(add.reduce((x.conj() * x).real, axis=None))
+
+ # Normalize the `axis` argument to a tuple.
nd = x.ndim
- if nd == 1:
+ if axis is None:
+ axis = tuple(range(nd))
+ elif not isinstance(axis, tuple):
+ axis = (axis,)
+
+ if len(axis) == 1:
if ord == Inf:
- return abs(x).max()
+ return abs(x).max(axis=axis)
elif ord == -Inf:
- return abs(x).min()
+ return abs(x).min(axis=axis)
elif ord == 0:
- return (x != 0).sum() # Zero norm
+ # Zero norm
+ return (x != 0).sum(axis=axis)
elif ord == 1:
- return abs(x).sum() # special case for speedup
- elif ord == 2:
- return sqrt(((x.conj()*x).real).sum()) # special case for speedup
+ # special case for speedup
+ return add.reduce(abs(x), axis=axis)
+ elif ord is None or ord == 2:
+ # special case for speedup
+ s = (x.conj() * x).real
+ return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- return ((abs(x)**ord).sum())**(1.0/ord)
- elif nd == 2:
+ if x.dtype.type is not longdouble:
+ # Convert to a float type, so integer arrays give
+ # float results. Don't apply asfarray to longdouble arrays,
+ # because it will downcast to float64.
+ absx = asfarray(abs(x))
+ return add.reduce(absx**ord, axis=axis)**(1.0/ord)
+ elif len(axis) == 2:
+ row_axis, col_axis = axis
+ if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
+ raise ValueError('Invalid axis %r for an array with shape %r' %
+ (axis, x.shape))
+ if row_axis % nd == col_axis % nd:
+ raise ValueError('Duplicate axes given.')
if ord == 2:
- return svd(x, compute_uv=0).max()
+ return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
- return svd(x, compute_uv=0).min()
+ return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
- return abs(x).sum(axis=0).max()
+ if col_axis > row_axis:
+ col_axis -= 1
+ return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
- return abs(x).sum(axis=1).max()
+ if row_axis > col_axis:
+ row_axis -= 1
+ return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
- return abs(x).sum(axis=0).min()
+ if col_axis > row_axis:
+ col_axis -= 1
+ return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
- return abs(x).sum(axis=1).min()
- elif ord in ['fro','f']:
- return sqrt(add.reduce((x.conj() * x).real.ravel()))
+ if row_axis > col_axis:
+ row_axis -= 1
+ return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
+ elif ord in [None, 'fro', 'f']:
+ return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index 1fb7a3acd..1c73c86d3 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -1,4 +1,6 @@
+from __future__ import division, print_function
+import os
import sys
def configuration(parent_package='',top_path=None):
@@ -9,6 +11,18 @@ def configuration(parent_package='',top_path=None):
config.add_data_dir('tests')
# Configure lapack_lite
+
+ src_dir = 'lapack_lite'
+ lapack_lite_src = [
+ os.path.join(src_dir, 'python_xerbla.c'),
+ os.path.join(src_dir, 'zlapack_lite.c'),
+ os.path.join(src_dir, 'dlapack_lite.c'),
+ os.path.join(src_dir, 'blas_lite.c'),
+ os.path.join(src_dir, 'dlamch.c'),
+ os.path.join(src_dir, 'f2c_lite.c'),
+ os.path.join(src_dir, 'f2c.h'),
+ ]
+
lapack_info = get_info('lapack_opt',0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
@@ -22,14 +36,19 @@ def configuration(parent_package='',top_path=None):
config.add_extension('lapack_lite',
sources = [get_lapack_lite_sources],
- depends= ['lapack_litemodule.c',
- 'python_xerbla.c',
- 'zlapack_lite.c', 'dlapack_lite.c',
- 'blas_lite.c', 'dlamch.c',
- 'f2c_lite.c','f2c.h'],
+ depends = ['lapack_litemodule.c'] + lapack_lite_src,
extra_info = lapack_info
)
+ # umath_linalg module
+
+ config.add_extension('_umath_linalg',
+ sources = [get_lapack_lite_sources],
+ depends = ['umath_linalg.c.src'] + lapack_lite_src,
+ extra_info = lapack_info,
+ libraries = ['npymath'],
+ )
+
return config
if __name__ == '__main__':
diff --git a/numpy/linalg/setupscons.py b/numpy/linalg/setupscons.py
deleted file mode 100644
index fd05ce9af..000000000
--- a/numpy/linalg/setupscons.py
+++ /dev/null
@@ -1,19 +0,0 @@
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- from numpy.distutils.system_info import get_info
- config = Configuration('linalg',parent_package,top_path)
-
- config.add_data_dir('tests')
-
- config.add_sconscript('SConstruct',
- source_files = ['lapack_litemodule.c',
- 'zlapack_lite.c', 'dlapack_lite.c',
- 'blas_lite.c', 'dlamch.c',
- 'f2c_lite.c','f2c.h'])
-
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index 691fd7a6b..27fbd6429 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from subprocess import call, PIPE, Popen
import sys
import re
diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py
new file mode 100644
index 000000000..13d244199
--- /dev/null
+++ b/numpy/linalg/tests/test_deprecations.py
@@ -0,0 +1,24 @@
+"""Test deprecation and future warnings.
+
+"""
+import numpy as np
+from numpy.testing import assert_warns, run_module_suite
+
+
+def test_qr_mode_full_future_warning():
+ """Check mode='full' FutureWarning.
+
+ In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were
+ deprecated. The release date will probably be sometime in the summer
+ of 2013.
+
+ """
+ a = np.eye(2)
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
+
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/linalg/tests/test_gufuncs_linalg.py b/numpy/linalg/tests/test_gufuncs_linalg.py
new file mode 100644
index 000000000..d3299b7b5
--- /dev/null
+++ b/numpy/linalg/tests/test_gufuncs_linalg.py
@@ -0,0 +1,500 @@
+"""
+Test functions for gufuncs_linalg module
+Heavily inspired (ripped in part) test_linalg
+"""
+from __future__ import division, print_function
+
+################################################################################
+# The following functions are implemented in the module "gufuncs_linalg"
+#
+# category "linalg"
+# - inv (TestInv)
+# - poinv (TestPoinv)
+# - det (TestDet)
+# - slogdet (TestDet)
+# - eig (TestEig)
+# - eigh (TestEigh)
+# - eigvals (TestEigvals)
+# - eigvalsh (TestEigvalsh)
+# - cholesky
+# - solve (TestSolve)
+# - chosolve (TestChosolve)
+# - svd (TestSVD)
+
+# ** unimplemented **
+# - qr
+# - matrix_power
+# - matrix_rank
+# - pinv
+# - lstsq
+# - tensorinv
+# - tensorsolve
+# - norm
+# - cond
+#
+# category "inspired by pdl"
+# - quadratic_form
+# - matrix_multiply3
+# - add3 (TestAdd3)
+# - multiply3 (TestMultiply3)
+# - multiply3_add (TestMultiply3Add)
+# - multiply_add (TestMultiplyAdd)
+# - multiply_add2 (TestMultiplyAdd2)
+# - multiply4 (TestMultiply4)
+# - multiply4_add (TestMultiply4Add)
+#
+# category "others"
+# - convolve
+# - inner1d
+# - innerwt
+# - matrix_multiply
+
+from nose.plugins.skip import Skip, SkipTest
+import numpy as np
+
+from numpy.testing import (TestCase, assert_, assert_equal, assert_raises,
+ assert_array_equal, assert_almost_equal,
+ run_module_suite)
+
+from numpy import array, single, double, csingle, cdouble, dot, identity
+from numpy import multiply, inf
+import numpy.linalg._gufuncs_linalg as gula
+
+old_assert_almost_equal = assert_almost_equal
+
+def assert_almost_equal(a, b, **kw):
+ if a.dtype.type in (single, csingle):
+ decimal = 5
+ else:
+ decimal = 10
+ old_assert_almost_equal(a, b, decimal = decimal, **kw)
+
+
+def assert_valid_eigen_no_broadcast(M, w, v, **kw):
+ lhs = gula.matrix_multiply(M,v)
+ rhs = w*v
+ assert_almost_equal(lhs, rhs, **kw)
+
+
+def assert_valid_eigen_recurse(M, w, v, **kw):
+ """check that w and v are valid eigenvalues/eigenvectors for matrix M
+ broadcast"""
+ if len(M.shape) > 2:
+ for i in range(M.shape[0]):
+ assert_valid_eigen_recurse(M[i], w[i], v[i], **kw)
+ else:
+ if len(M.shape) == 2:
+ assert_valid_eigen_no_broadcast(M, w, v, **kw)
+ else:
+ raise AssertionError('Not enough dimensions')
+
+
+def assert_valid_eigen(M, w, v, **kw):
+ if np.any(np.isnan(M)):
+ raise AssertionError('nan found in matrix')
+ if np.any(np.isnan(w)):
+ raise AssertionError('nan found in eigenvalues')
+ if np.any(np.isnan(v)):
+ raise AssertionError('nan found in eigenvectors')
+
+ assert_valid_eigen_recurse(M, w, v, **kw)
+
+
+def assert_valid_eigenvals_no_broadcast(M, w, **kw):
+ ident = np.eye(M.shape[0], dtype=M.dtype)
+ for i in range(w.shape[0]):
+ assert_almost_equal(gula.det(M - w[i]*ident), 0.0, **kw)
+
+
+def assert_valid_eigenvals_recurse(M, w, **kw):
+ if len(M.shape) > 2:
+ for i in range(M.shape[0]):
+ assert_valid_eigenvals_recurse(M[i], w[i], **kw)
+ else:
+ if len(M.shape) == 2:
+ assert_valid_eigenvals_no_broadcast(M, w, **kw)
+ else:
+ raise AssertionError('Not enough dimensions')
+
+
+def assert_valid_eigenvals(M, w, **kw):
+ if np.any(np.isnan(M)):
+ raise AssertionError('nan found in matrix')
+ if np.any(np.isnan(w)):
+ raise AssertionError('nan found in eigenvalues')
+ assert_valid_eigenvals_recurse(M, w, **kw)
+
+
+class MatrixGenerator(object):
+ def real_matrices(self):
+ a = [[1,2],
+ [3,4]]
+
+ b = [[4,3],
+ [2,1]]
+
+ return a, b
+
+ def real_symmetric_matrices(self):
+ a = [[ 2 ,-1],
+ [-1 , 2]]
+
+ b = [[4,3],
+ [2,1]]
+
+ return a, b
+
+ def complex_matrices(self):
+ a = [[1+2j,2+3j],
+ [3+4j,4+5j]]
+
+ b = [[4+3j,3+2j],
+ [2+1j,1+0j]]
+
+ return a, b
+
+ def complex_hermitian_matrices(self):
+ a = [[2,-1],
+ [-1, 2]]
+
+ b = [[4+3j,3+2j],
+ [2-1j,1+0j]]
+
+ return a, b
+
+ def real_matrices_vector(self):
+ a, b = self.real_matrices()
+ return [a], [b]
+
+ def real_symmetric_matrices_vector(self):
+ a, b = self.real_symmetric_matrices()
+ return [a], [b]
+
+ def complex_matrices_vector(self):
+ a, b = self.complex_matrices()
+ return [a], [b]
+
+ def complex_hermitian_matrices_vector(self):
+ a, b = self.complex_hermitian_matrices()
+ return [a], [b]
+
+
+class GeneralTestCase(MatrixGenerator):
+ def test_single(self):
+ a,b = self.real_matrices()
+ self.do(array(a, dtype=single),
+ array(b, dtype=single))
+
+ def test_double(self):
+ a, b = self.real_matrices()
+ self.do(array(a, dtype=double),
+ array(b, dtype=double))
+
+ def test_csingle(self):
+ a, b = self.complex_matrices()
+ self.do(array(a, dtype=csingle),
+ array(b, dtype=csingle))
+
+ def test_cdouble(self):
+ a, b = self.complex_matrices()
+ self.do(array(a, dtype=cdouble),
+ array(b, dtype=cdouble))
+
+ def test_vector_single(self):
+ a,b = self.real_matrices_vector()
+ self.do(array(a, dtype=single),
+ array(b, dtype=single))
+
+ def test_vector_double(self):
+ a, b = self.real_matrices_vector()
+ self.do(array(a, dtype=double),
+ array(b, dtype=double))
+
+ def test_vector_csingle(self):
+ a, b = self.complex_matrices_vector()
+ self.do(array(a, dtype=csingle),
+ array(b, dtype=csingle))
+
+ def test_vector_cdouble(self):
+ a, b = self.complex_matrices_vector()
+ self.do(array(a, dtype=cdouble),
+ array(b, dtype=cdouble))
+
+
+class HermitianTestCase(MatrixGenerator):
+ def test_single(self):
+ a,b = self.real_symmetric_matrices()
+ self.do(array(a, dtype=single),
+ array(b, dtype=single))
+
+ def test_double(self):
+ a, b = self.real_symmetric_matrices()
+ self.do(array(a, dtype=double),
+ array(b, dtype=double))
+
+ def test_csingle(self):
+ a, b = self.complex_hermitian_matrices()
+ self.do(array(a, dtype=csingle),
+ array(b, dtype=csingle))
+
+ def test_cdouble(self):
+ a, b = self.complex_hermitian_matrices()
+ self.do(array(a, dtype=cdouble),
+ array(b, dtype=cdouble))
+
+ def test_vector_single(self):
+ a,b = self.real_symmetric_matrices_vector()
+ self.do(array(a, dtype=single),
+ array(b, dtype=single))
+
+ def test_vector_double(self):
+ a, b = self.real_symmetric_matrices_vector()
+ self.do(array(a, dtype=double),
+ array(b, dtype=double))
+
+ def test_vector_csingle(self):
+ a, b = self.complex_hermitian_matrices_vector()
+ self.do(array(a, dtype=csingle),
+ array(b, dtype=csingle))
+
+ def test_vector_cdouble(self):
+ a, b = self.complex_hermitian_matrices_vector()
+ self.do(array(a, dtype=cdouble),
+ array(b, dtype=cdouble))
+
+
+class TestMatrixMultiply(GeneralTestCase):
+ def do(self, a, b):
+ res = gula.matrix_multiply(a,b)
+ if a.ndim == 2:
+ assert_almost_equal(res, np.dot(a,b))
+ else:
+ assert_almost_equal(res[0], np.dot(a[0],b[0]))
+
+ def test_column_matrix(self):
+ A = np.arange(2*2).reshape((2,2))
+ B = np.arange(2*1).reshape((2,1))
+ res = gula.matrix_multiply(A,B)
+ assert_almost_equal(res, np.dot(A,B))
+
+class TestInv(GeneralTestCase, TestCase):
+ def do(self, a, b):
+ a_inv = gula.inv(a)
+ ident = identity(a.shape[-1])
+ if 3 == len(a.shape):
+ ident = ident.reshape((1, ident.shape[0], ident.shape[1]))
+ assert_almost_equal(gula.matrix_multiply(a, a_inv), ident)
+
+
+class TestPoinv(HermitianTestCase, TestCase):
+ def do(self, a, b):
+ a_inv = gula.poinv(a)
+ ident = identity(a.shape[-1])
+ if 3 == len(a.shape):
+ ident = ident.reshape((1,ident.shape[0], ident.shape[1]))
+
+ assert_almost_equal(a_inv, gula.inv(a))
+ assert_almost_equal(gula.matrix_multiply(a, a_inv), ident)
+
+
+class TestDet(GeneralTestCase, TestCase):
+ def do(self, a, b):
+ d = gula.det(a)
+ s, ld = gula.slogdet(a)
+ assert_almost_equal(s * np.exp(ld), d)
+
+ if np.csingle == a.dtype.type or np.single == a.dtype.type:
+ cmp_type=np.csingle
+ else:
+ cmp_type=np.cdouble
+
+ ev = gula.eigvals(a.astype(cmp_type))
+ assert_almost_equal(d.astype(cmp_type),
+ multiply.reduce(ev.astype(cmp_type),
+ axis=(ev.ndim-1)))
+ if s != 0:
+ assert_almost_equal(np.abs(s), 1)
+ else:
+ assert_equal(ld, -inf)
+
+ def test_zero(self):
+ assert_equal(gula.det(array([[0.0]], dtype=single)), 0.0)
+ assert_equal(gula.det(array([[0.0]], dtype=double)), 0.0)
+ assert_equal(gula.det(array([[0.0]], dtype=csingle)), 0.0)
+ assert_equal(gula.det(array([[0.0]], dtype=cdouble)), 0.0)
+
+ assert_equal(gula.slogdet(array([[0.0]], dtype=single)), (0.0, -inf))
+ assert_equal(gula.slogdet(array([[0.0]], dtype=double)), (0.0, -inf))
+ assert_equal(gula.slogdet(array([[0.0]], dtype=csingle)), (0.0, -inf))
+ assert_equal(gula.slogdet(array([[0.0]], dtype=cdouble)), (0.0, -inf))
+
+ def test_types(self):
+ for typ in [(single, single),
+ (double, double),
+ (csingle, single),
+ (cdouble, double)]:
+ for x in [ [0], [[0]], [[[0]]] ]:
+ assert_equal(gula.det(array(x, dtype=typ[0])).dtype, typ[0])
+ assert_equal(gula.slogdet(array(x, dtype=typ[0]))[0].dtype, typ[0])
+ assert_equal(gula.slogdet(array(x, dtype=typ[0]))[1].dtype, typ[1])
+
+
+class TestEig(GeneralTestCase, TestCase):
+ def do(self, a, b):
+ evalues, evectors = gula.eig(a)
+ assert_valid_eigenvals(a, evalues)
+ assert_valid_eigen(a, evalues, evectors)
+ ev = gula.eigvals(a)
+ assert_valid_eigenvals(a, evalues)
+ assert_almost_equal(ev, evalues)
+
+
+class TestEigh(HermitianTestCase, TestCase):
+ def do(self, a, b):
+ evalues_lo, evectors_lo = gula.eigh(a, UPLO='L')
+ evalues_up, evectors_up = gula.eigh(a, UPLO='U')
+
+ assert_valid_eigenvals(a, evalues_lo)
+ assert_valid_eigenvals(a, evalues_up)
+ assert_valid_eigen(a, evalues_lo, evectors_lo)
+ assert_valid_eigen(a, evalues_up, evectors_up)
+ assert_almost_equal(evalues_lo, evalues_up)
+ assert_almost_equal(evectors_lo, evectors_up)
+
+ ev_lo = gula.eigvalsh(a, UPLO='L')
+ ev_up = gula.eigvalsh(a, UPLO='U')
+ assert_valid_eigenvals(a, ev_lo)
+ assert_valid_eigenvals(a, ev_up)
+ assert_almost_equal(ev_lo, evalues_lo)
+ assert_almost_equal(ev_up, evalues_up)
+
+
+class TestSolve(GeneralTestCase,TestCase):
+ def do(self, a, b):
+ x = gula.solve(a,b)
+ assert_almost_equal(b, gula.matrix_multiply(a,x))
+
+
+class TestChosolve(HermitianTestCase, TestCase):
+ def do(self, a, b):
+ """
+ inner1d not defined for complex types.
+ todo: implement alternative test
+ """
+ if csingle == a.dtype or cdouble == a.dtype:
+ raise SkipTest
+
+ x_lo = gula.chosolve(a, b, UPLO='L')
+ x_up = gula.chosolve(a, b, UPLO='U')
+ assert_almost_equal(x_lo, x_up)
+ # inner1d not defined for complex types
+ # todo: implement alternative test
+ assert_almost_equal(b, gula.matrix_multiply(a,x_lo))
+ assert_almost_equal(b, gula.matrix_multiply(a,x_up))
+
+
+class TestSVD(GeneralTestCase, TestCase):
+ def do(self, a, b):
+ """ still work in progress """
+ raise SkipTest
+ u, s, vt = gula.svd(a, 0)
+ assert_almost_equal(a, dot(multiply(u, s), vt))
+
+"""
+class TestCholesky(HermitianTestCase, TestCase):
+ def do(self, a, b):
+ pass
+"""
+
+################################################################################
+# ufuncs inspired by pdl
+# - add3
+# - multiply3
+# - multiply3_add
+# - multiply_add
+# - multiply_add2
+# - multiply4
+# - multiply4_add
+
+class UfuncTestCase(object):
+ parameter = range(0,10)
+
+ def _check_for_type(self, typ):
+ a = np.array(self.__class__.parameter, dtype=typ)
+ self.do(a)
+
+ def _check_for_type_vector(self, typ):
+ parameter = self.__class__.parameter
+ a = np.array([parameter, parameter], dtype=typ)
+ self.do(a)
+
+ def test_single(self):
+ self._check_for_type(single)
+
+ def test_double(self):
+ self._check_for_type(double)
+
+ def test_csingle(self):
+ self._check_for_type(csingle)
+
+ def test_cdouble(self):
+ self._check_for_type(cdouble)
+
+ def test_single_vector(self):
+ self._check_for_type_vector(single)
+
+ def test_double_vector(self):
+ self._check_for_type_vector(double)
+
+ def test_csingle_vector(self):
+ self._check_for_type_vector(csingle)
+
+ def test_cdouble_vector(self):
+ self._check_for_type_vector(cdouble)
+
+
+class TestAdd3(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.add3(a,a,a)
+ assert_almost_equal(r, a+a+a)
+
+
+class TestMultiply3(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply3(a,a,a)
+ assert_almost_equal(r, a*a*a)
+
+
+class TestMultiply3Add(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply3_add(a,a,a,a)
+ assert_almost_equal(r, a*a*a+a)
+
+
+class TestMultiplyAdd(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply_add(a,a,a)
+ assert_almost_equal(r, a*a+a)
+
+
+class TestMultiplyAdd2(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply_add2(a,a,a,a)
+ assert_almost_equal(r, a*a+a+a)
+
+
+class TestMultiply4(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply4(a,a,a,a)
+ assert_almost_equal(r, a*a*a*a)
+
+
+class TestMultiply4_add(UfuncTestCase, TestCase):
+ def do(self, a):
+ r = gula.multiply4_add(a,a,a,a,a)
+ assert_almost_equal(r, a*a*a*a+a)
+
+
+if __name__ == "__main__":
+ print('testing gufuncs_linalg; gufuncs version: %s' % gula._impl.__version__)
+ run_module_suite()
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 623939863..7f102634e 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1,11 +1,14 @@
""" Test functions for linalg module
"""
+from __future__ import division, absolute_import, print_function
+
+import os
import sys
import numpy as np
from numpy.testing import (TestCase, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal,
- run_module_suite)
+ run_module_suite, dec)
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
@@ -25,6 +28,14 @@ def assert_almost_equal(a, b, **kw):
decimal = 12
old_assert_almost_equal(a, b, decimal=decimal, **kw)
+def get_real_dtype(dtype):
+ return {single: single, double: double,
+ csingle: single, cdouble: double}[dtype]
+
+def get_complex_dtype(dtype):
+ return {single: csingle, double: cdouble,
+ csingle: csingle, cdouble: cdouble}[dtype]
+
class LinalgTestCase(object):
def test_single(self):
@@ -63,7 +74,7 @@ class LinalgTestCase(object):
try:
self.do(a, b)
raise AssertionError("%s should fail with empty matrices", self.__name__[5:])
- except linalg.LinAlgError, e:
+ except linalg.LinAlgError as e:
pass
def test_nonarray(self):
@@ -136,43 +147,193 @@ class LinalgNonsquareTestCase(object):
self.do(a, b)
-class TestSolve(LinalgTestCase, TestCase):
+def _generalized_testcase(new_cls_name, old_cls):
+ def get_method(old_name, new_name):
+ def method(self):
+ base = old_cls()
+ def do(a, b):
+ a = np.array([a, a, a])
+ b = np.array([b, b, b])
+ self.do(a, b)
+ base.do = do
+ getattr(base, old_name)()
+ method.__name__ = new_name
+ return method
+
+ dct = dict()
+ for old_name in dir(old_cls):
+ if old_name.startswith('test_'):
+ new_name = old_name + '_generalized'
+ dct[new_name] = get_method(old_name, new_name)
+
+ return type(new_cls_name, (object,), dct)
+
+LinalgGeneralizedTestCase = _generalized_testcase(
+ 'LinalgGeneralizedTestCase', LinalgTestCase)
+LinalgGeneralizedNonsquareTestCase = _generalized_testcase(
+ 'LinalgGeneralizedNonsquareTestCase', LinalgNonsquareTestCase)
+
+
+def dot_generalized(a, b):
+ a = asarray(a)
+ if a.ndim == 3:
+ return np.array([dot(ax, bx) for ax, bx in zip(a, b)])
+ elif a.ndim > 3:
+ raise ValueError("Not implemented...")
+ return dot(a, b)
+
+def identity_like_generalized(a):
+ a = asarray(a)
+ if a.ndim == 3:
+ return np.array([identity(a.shape[-2]) for ax in a])
+ elif a.ndim > 3:
+ raise ValueError("Not implemented...")
+ return identity(a.shape[0])
+
+
+class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
x = linalg.solve(a, b)
- assert_almost_equal(b, dot(a, x))
+ assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.solve(x, x).dtype, dtype)
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
-class TestInv(LinalgTestCase, TestCase):
+ def test_0_size(self):
+ class ArraySubclass(np.ndarray):
+ pass
+ # Test system of 0x0 matrices
+ a = np.arange(8).reshape(2, 2, 2)
+ b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
+
+ expected = linalg.solve(a, b)[:,0:0,:]
+ result = linalg.solve(a[:,0:0,0:0], b[:,0:0,:])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+ # Test errors for non-square and only b's dimension being 0
+ assert_raises(linalg.LinAlgError, linalg.solve, a[:,0:0,0:1], b)
+ assert_raises(ValueError, linalg.solve, a, b[:,0:0,:])
+
+ # Test broadcasting error
+ b = np.arange(6).reshape(1, 3, 2) # broadcasting error
+ assert_raises(ValueError, linalg.solve, a, b)
+ assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
+
+ # Test zero "single equations" with 0x0 matrices.
+ b = np.arange(2).reshape(1, 2).view(ArraySubclass)
+ expected = linalg.solve(a, b)[:,0:0]
+ result = linalg.solve(a[:,0:0,0:0], b[:,0:0])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+ b = np.arange(3).reshape(1, 3)
+ assert_raises(ValueError, linalg.solve, a, b)
+ assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
+ assert_raises(ValueError, linalg.solve, a[:,0:0,0:0], b)
+
+
+class TestInv(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
a_inv = linalg.inv(a)
- assert_almost_equal(dot(a, a_inv), identity(asarray(a).shape[0]))
+ assert_almost_equal(dot_generalized(a, a_inv),
+ identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.inv(x).dtype, dtype)
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0,1,1), dtype=np.int_).view(ArraySubclass)
+ res = linalg.inv(a)
+ assert_(res.dtype.type is np.float64)
+ assert_equal(a.shape, res.shape)
+ assert_(isinstance(a, ArraySubclass))
+
+ a = np.zeros((0,0), dtype=np.complex64).view(ArraySubclass)
+ res = linalg.inv(a)
+ assert_(res.dtype.type is np.complex64)
+ assert_equal(a.shape, res.shape)
-class TestEigvals(LinalgTestCase, TestCase):
+
+class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, dtype)
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
+
-class TestEig(LinalgTestCase, TestCase):
+class TestEig(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
evalues, evectors = linalg.eig(a)
- assert_almost_equal(dot(a, evectors), multiply(evectors, evalues))
+ if evectors.ndim == 3:
+ assert_almost_equal(dot_generalized(a, evectors), evectors * evalues[:,None,:])
+ else:
+ assert_almost_equal(dot(a, evectors), multiply(evectors, evalues))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, dtype)
+ assert_equal(v.dtype, dtype)
+
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, get_complex_dtype(dtype))
+ assert_equal(v.dtype, get_complex_dtype(dtype))
-class TestSVD(LinalgTestCase, TestCase):
+ for dtype in [single, double, csingle, cdouble]:
+ yield dtype
+
+
+class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
u, s, vt = linalg.svd(a, 0)
- assert_almost_equal(a, dot(multiply(u, s), vt))
+ if u.ndim == 3:
+ assert_almost_equal(a, dot_generalized(u * s[:,None,:], vt))
+ else:
+ assert_almost_equal(a, dot(multiply(u, s), vt))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ u, s, vh = linalg.svd(x)
+ assert_equal(u.dtype, dtype)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+ assert_equal(vh.dtype, dtype)
+ s = linalg.svd(x, compute_uv=False)
+ assert_equal(s.dtype, get_real_dtype(dtype))
-class TestCondSVD(LinalgTestCase, TestCase):
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
+
+
+class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
@@ -199,7 +360,7 @@ class TestPinv(LinalgTestCase, TestCase):
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
-class TestDet(LinalgTestCase, TestCase):
+class TestDet(LinalgTestCase, LinalgGeneralizedTestCase, TestCase):
def do(self, a, b):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
@@ -208,12 +369,14 @@ class TestDet(LinalgTestCase, TestCase):
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
- assert_almost_equal(d, multiply.reduce(ev))
- assert_almost_equal(s * np.exp(ld), multiply.reduce(ev))
- if s != 0:
- assert_almost_equal(np.abs(s), 1)
- else:
- assert_equal(ld, -inf)
+ assert_almost_equal(d, multiply.reduce(ev, axis=-1))
+ assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
+
+ s = np.atleast_1d(s)
+ ld = np.atleast_1d(ld)
+ m = (s != 0)
+ assert_almost_equal(np.abs(s[m]), 1)
+ assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
@@ -228,6 +391,13 @@ class TestDet(LinalgTestCase, TestCase):
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(np.linalg.det(x), get_real_dtype(dtype))
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
+
class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase, TestCase):
def do(self, a, b):
@@ -267,10 +437,10 @@ class TestMatrixPower(object):
large[0,:] = t
def test_large_power(self):
- assert_equal(matrix_power(self.R90,2L**100+2**10+2**5+1),self.R90)
+ assert_equal(matrix_power(self.R90,2**100+2**10+2**5+1),self.R90)
def test_large_power_trailing_zero(self):
- assert_equal(matrix_power(self.R90,2L**100+2**10+2**5),identity(2))
+ assert_equal(matrix_power(self.R90,2**100+2**10+2**5),identity(2))
def testip_zero(self):
def tz(M):
@@ -318,86 +488,112 @@ class TestBoolPower(TestCase):
class HermitianTestCase(object):
def test_single(self):
a = array([[1.,2.], [2.,1.]], dtype=single)
- self.do(a)
+ self.do(a, None)
def test_double(self):
a = array([[1.,2.], [2.,1.]], dtype=double)
- self.do(a)
+ self.do(a, None)
def test_csingle(self):
a = array([[1.,2+3j], [2-3j,1]], dtype=csingle)
- self.do(a)
+ self.do(a, None)
def test_cdouble(self):
a = array([[1.,2+3j], [2-3j,1]], dtype=cdouble)
- self.do(a)
+ self.do(a, None)
def test_empty(self):
a = atleast_2d(array([], dtype = double))
- assert_raises(linalg.LinAlgError, self.do, a)
+ assert_raises(linalg.LinAlgError, self.do, a, None)
def test_nonarray(self):
a = [[1,2], [2,1]]
- self.do(a)
+ self.do(a, None)
def test_matrix_b_only(self):
"""Check that matrix type is preserved."""
a = array([[1.,2.], [2.,1.]])
- self.do(a)
+ self.do(a, None)
def test_matrix_a_and_b(self):
"""Check that matrix type is preserved."""
a = matrix([[1.,2.], [2.,1.]])
- self.do(a)
+ self.do(a, None)
+
+HermitianGeneralizedTestCase = _generalized_testcase(
+ 'HermitianGeneralizedTestCase', HermitianTestCase)
-class TestEigvalsh(HermitianTestCase, TestCase):
- def do(self, a):
+class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase, TestCase):
+ def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a)
evalues, evectors = linalg.eig(a)
- ev.sort()
- evalues.sort()
+ ev.sort(axis=-1)
+ evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(np.linalg.eigvalsh(x), get_real_dtype(dtype))
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
-class TestEigh(HermitianTestCase, TestCase):
- def do(self, a):
+
+class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase, TestCase):
+ def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
- ev.sort()
- evalues.sort()
+ ev.sort(axis=-1)
+ evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
+ def test_types(self):
+ def check(dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w, get_real_dtype(dtype))
+ assert_equal(v, dtype)
+ for dtype in [single, double, csingle, cdouble]:
+ yield check, dtype
+
class _TestNorm(TestCase):
+
dt = None
dec = None
+
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector(self):
- a = [1.0,2.0,3.0,4.0]
- b = [-1.0,-2.0,-3.0,-4.0]
- c = [-1.0, 2.0,-3.0, 4.0]
+ a = [1, 2, 3, 4]
+ b = [-1, -2, -3, -4]
+ c = [-1, 2, -3, 4]
def _test(v):
- np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,inf), 4.0, decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,-inf), 1.0, decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,1), 10.0, decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,-1), 12.0/25,
- decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,2), 30**0.5,
- decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,-2), ((205./144)**-0.5),
- decimal=self.dec)
- np.testing.assert_almost_equal(norm(v,0), 4, decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v), 30**0.5,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, inf), 4.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -inf), 1.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 1), 10.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -1), 12.0/25,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 2), 30**0.5,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5),
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 0), 4,
+ decimal=self.dec)
for v in (a, b, c,):
_test(v)
@@ -407,25 +603,82 @@ class _TestNorm(TestCase):
_test(v)
def test_matrix(self):
- A = matrix([[1.,3.],[5.,7.]], dtype=self.dt)
- A = matrix([[1.,3.],[5.,7.]], dtype=self.dt)
+ A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84**0.5)
- assert_almost_equal(norm(A,'fro'), 84**0.5)
- assert_almost_equal(norm(A,inf), 12.0)
- assert_almost_equal(norm(A,-inf), 4.0)
- assert_almost_equal(norm(A,1), 10.0)
- assert_almost_equal(norm(A,-1), 6.0)
- assert_almost_equal(norm(A,2), 9.1231056256176615)
- assert_almost_equal(norm(A,-2), 0.87689437438234041)
+ assert_almost_equal(norm(A, 'fro'), 84**0.5)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
self.assertRaises(ValueError, norm, A, 'nofro')
self.assertRaises(ValueError, norm, A, -3)
self.assertRaises(ValueError, norm, A, 0)
+ def test_axis(self):
+ # Vector norms.
+ # Compare the use of `axis` with computing the norm of each row
+ # or column separately.
+ A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
+ expected0 = [norm(A[:,k], ord=order) for k in range(A.shape[1])]
+ assert_almost_equal(norm(A, ord=order, axis=0), expected0)
+ expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])]
+ assert_almost_equal(norm(A, ord=order, axis=1), expected1)
+
+ # Matrix norms.
+ B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
+
+ for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
+ assert_almost_equal(norm(A, ord=order), norm(A, ord=order,
+ axis=(0, 1)))
+
+ n = norm(B, ord=order, axis=(1, 2))
+ expected = [norm(B[k], ord=order) for k in range(B.shape[0])]
+ assert_almost_equal(n, expected)
+
+ n = norm(B, ord=order, axis=(2, 1))
+ expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])]
+ assert_almost_equal(n, expected)
+
+ n = norm(B, ord=order, axis=(0, 2))
+ expected = [norm(B[:,k,:], ord=order) for k in range(B.shape[1])]
+ assert_almost_equal(n, expected)
+
+ n = norm(B, ord=order, axis=(0, 1))
+ expected = [norm(B[:,:,k], ord=order) for k in range(B.shape[2])]
+ assert_almost_equal(n, expected)
+
+ def test_bad_args(self):
+ # Check that bad arguments raise the appropriate exceptions.
+
+ A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
+
+ # Using `axis=<integer>` or passing in a 1-D array implies vector
+ # norms are being computed, so also using `ord='fro'` raises a
+ # ValueError.
+ self.assertRaises(ValueError, norm, A, 'fro', 0)
+ self.assertRaises(ValueError, norm, [3, 4], 'fro', None)
+
+ # Similarly, norm should raise an exception when ord is any finite
+ # number other than 1, 2, -1 or -2 when computing matrix norms.
+ for order in [0, 3]:
+ self.assertRaises(ValueError, norm, A, order, None)
+ self.assertRaises(ValueError, norm, A, order, (0, 1))
+ self.assertRaises(ValueError, norm, B, order, (1, 2))
+
+ # Invalid axis
+ self.assertRaises(ValueError, norm, B, None, 3)
+ self.assertRaises(ValueError, norm, B, None, (2, 3))
+ self.assertRaises(ValueError, norm, B, None, (0, 1, 2))
+
class TestNormDouble(_TestNorm):
dt = np.double
- dec= 12
+ dec = 12
class TestNormSingle(_TestNorm):
@@ -433,6 +686,11 @@ class TestNormSingle(_TestNorm):
dec = 6
+class TestNormInt64(_TestNorm):
+ dt = np.int64
+ dec = 12
+
+
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
@@ -467,11 +725,115 @@ def test_reduced_rank():
class TestQR(TestCase):
+
+
+ def check_qr(self, a):
+ # This test expects the argument `a` to be an ndarray or
+ # a subclass of an ndarray of inexact type.
+ a_type = type(a)
+ a_dtype = a.dtype
+ m, n = a.shape
+ k = min(m, n)
+
+ # mode == 'complete'
+ q, r = linalg.qr(a, mode='complete')
+ assert_(q.dtype == a_dtype)
+ assert_(r.dtype == a_dtype)
+ assert_(isinstance(q, a_type))
+ assert_(isinstance(r, a_type))
+ assert_(q.shape == (m, m))
+ assert_(r.shape == (m, n))
+ assert_almost_equal(dot(q, r), a)
+ assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
+ assert_almost_equal(np.triu(r), r)
+
+
+ # mode == 'reduced'
+ q1, r1 = linalg.qr(a, mode='reduced')
+ assert_(q1.dtype == a_dtype)
+ assert_(r1.dtype == a_dtype)
+ assert_(isinstance(q1, a_type))
+ assert_(isinstance(r1, a_type))
+ assert_(q1.shape == (m, k))
+ assert_(r1.shape == (k, n))
+ assert_almost_equal(dot(q1, r1), a)
+ assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
+ assert_almost_equal(np.triu(r1), r1)
+
+ # mode == 'r'
+ r2 = linalg.qr(a, mode='r')
+ assert_(r2.dtype == a_dtype)
+ assert_(isinstance(r2, a_type))
+ assert_almost_equal(r2, r1)
+
+
+
def test_qr_empty(self):
a = np.zeros((0,2))
self.assertRaises(linalg.LinAlgError, linalg.qr, a)
+ def test_mode_raw(self):
+ a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+ b = a.astype(np.single)
+
+ # m > n
+ h1, tau1 = (
+ array([[-5.91607978, 0.43377175, 0.72295291],
+ [-7.43735744, 0.82807867, 0.89262383]]),
+ array([ 1.16903085, 1.113104 ])
+ )
+ # m > n
+ h2, tau2 = (
+ array([[-2.23606798, 0.61803399],
+ [-4.91934955, -0.89442719],
+ [-7.60263112, -1.78885438]]),
+ array([ 1.4472136, 0. ])
+ )
+
+ # Test double
+ h, tau = linalg.qr(a, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h1, decimal=8)
+ old_assert_almost_equal(tau, tau1, decimal=8)
+
+ h, tau = linalg.qr(a.T, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h2, decimal=8)
+ old_assert_almost_equal(tau, tau2, decimal=8)
+
+ # Test single
+ h, tau = linalg.qr(b, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ old_assert_almost_equal(h, h1, decimal=8)
+ old_assert_almost_equal(tau, tau1, decimal=8)
+
+
+ def test_mode_all_but_economic(self):
+ a = array([[1, 2], [3, 4]])
+ b = array([[1, 2], [3, 4], [5, 6]])
+ for dt in "fd":
+ m1 = a.astype(dt)
+ m2 = b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+ self.check_qr(matrix(m1))
+ for dt in "fd":
+ m1 = 1 + 1j * a.astype(dt)
+ m2 = 1 + 1j * b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+ self.check_qr(matrix(m1))
+
+
+
+
+
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
@@ -493,5 +855,77 @@ def test_byteorder_check():
assert_array_equal(res, routine(sw_arr))
+def test_generalized_raise_multiloop():
+ # It should raise an error even if the error doesn't occur in the
+ # last iteration of the ufunc inner loop
+
+ invertible = np.array([[1, 2], [3, 4]])
+ non_invertible = np.array([[1, 1], [1, 1]])
+
+ x = np.zeros([4, 4, 2, 2])[1::2]
+ x[...] = invertible
+ x[0,0] = non_invertible
+
+ assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
+
+def _is_xerbla_safe():
+ """
+ Check that running the xerbla test is safe --- if python_xerbla
+ is not successfully linked in, the standard xerbla routine is called,
+ which aborts the process.
+
+ """
+
+ try:
+ pid = os.fork()
+ except (OSError, AttributeError):
+ # fork failed, or not running on POSIX
+ return False
+
+ if pid == 0:
+ # child; close i/o file handles
+ os.close(1)
+ os.close(0)
+ # avoid producing core files
+ import resource
+ resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
+ # these calls may abort
+ try:
+ a = np.array([[1]])
+ np.linalg.lapack_lite.dgetrf(
+ 1, 1, a.astype(np.double),
+ 0, # <- invalid value
+ a.astype(np.intc), 0)
+ except:
+ pass
+ try:
+ np.linalg.lapack_lite.xerbla()
+ except:
+ pass
+ os._exit(111)
+ else:
+ # parent
+ pid, status = os.wait()
+ if os.WEXITSTATUS(status) == 111 and not os.WIFSIGNALED(status):
+ return True
+ return False
+
+@dec.skipif(not _is_xerbla_safe(), "python_xerbla not found")
+def test_xerbla():
+ # Test that xerbla works (with GIL)
+ a = np.array([[1]])
+ try:
+ np.linalg.lapack_lite.dgetrf(
+ 1, 1, a.astype(np.double),
+ 0, # <- invalid value
+ a.astype(np.intc), 0)
+ except ValueError as e:
+ assert_("DGETRF parameter number 4" in str(e))
+ else:
+ assert_(False)
+
+ # Test that xerbla works (without GIL)
+ assert_raises(ValueError, np.linalg.lapack_lite.xerbla)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index b3188f99c..76fe4be10 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -1,5 +1,7 @@
""" Test functions for linalg module
"""
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy as np
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
new file mode 100644
index 000000000..c5303a077
--- /dev/null
+++ b/numpy/linalg/umath_linalg.c.src
@@ -0,0 +1,4346 @@
+/* -*- c -*- */
+
+/*
+ *****************************************************************************
+ ** INCLUDES **
+ *****************************************************************************
+ */
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+
+#include "npy_pycompat.h"
+
+#include "npy_config.h"
+
+#include <stddef.h>
+#include <stdio.h>
+#include <assert.h>
+#include <math.h>
+
+
+static const char* umath_linalg_version_string = "0.1.4";
+
+/*
+ ****************************************************************************
+ * Debugging support *
+ ****************************************************************************
+ */
+#define TRACE_TXT(...) do { fprintf (stderr, __VA_ARGS__); } while (0)
+#define STACK_TRACE do {} while (0)
+#define TRACE\
+ do { \
+ fprintf (stderr, \
+ "%s:%d:%s\n", \
+ __FILE__, \
+ __LINE__, \
+ __FUNCTION__); \
+ STACK_TRACE; \
+ } while (0)
+
+#if 0
+#include <execinfo.h>
+void
+dbg_stack_trace()
+{
+ void *trace[32];
+ size_t size;
+
+ size = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
+ backtrace_symbols_fd(trace, size, 1);
+}
+
+#undef STACK_TRACE
+#define STACK_TRACE do { dbg_stack_trace(); } while (0)
+#endif
+
+/*
+ *****************************************************************************
+ * BLAS/LAPACK calling macros *
+ *****************************************************************************
+ */
+
+#ifdef NO_APPEND_FORTRAN
+# define FNAME(x) x
+#else
+# define FNAME(x) x##_
+#endif
+
+typedef struct { float r, i; } f2c_complex;
+typedef struct { double r, i; } f2c_doublecomplex;
+/* typedef long int (*L_fp)(); */
+
+extern int
+FNAME(sgeev)(char *jobvl, char *jobvr, int *n,
+ float a[], int *lda, float wr[], float wi[],
+ float vl[], int *ldvl, float vr[], int *ldvr,
+ float work[], int lwork[],
+ int *info);
+extern int
+FNAME(dgeev)(char *jobvl, char *jobvr, int *n,
+ double a[], int *lda, double wr[], double wi[],
+ double vl[], int *ldvl, double vr[], int *ldvr,
+ double work[], int lwork[],
+ int *info);
+extern int
+FNAME(cgeev)(char *jobvl, char *jobvr, int *n,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex w[],
+ f2c_doublecomplex vl[], int *ldvl,
+ f2c_doublecomplex vr[], int *ldvr,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[],
+ int *info);
+extern int
+FNAME(zgeev)(char *jobvl, char *jobvr, int *n,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex w[],
+ f2c_doublecomplex vl[], int *ldvl,
+ f2c_doublecomplex vr[], int *ldvr,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[],
+ int *info);
+
+extern int
+FNAME(ssyevd)(char *jobz, char *uplo, int *n,
+ float a[], int *lda, float w[], float work[],
+ int *lwork, int iwork[], int *liwork,
+ int *info);
+extern int
+FNAME(dsyevd)(char *jobz, char *uplo, int *n,
+ double a[], int *lda, double w[], double work[],
+ int *lwork, int iwork[], int *liwork,
+ int *info);
+extern int
+FNAME(cheevd)(char *jobz, char *uplo, int *n,
+ f2c_complex a[], int *lda,
+ float w[], f2c_complex work[],
+ int *lwork, float rwork[], int *lrwork, int iwork[],
+ int *liwork,
+ int *info);
+extern int
+FNAME(zheevd)(char *jobz, char *uplo, int *n,
+ f2c_doublecomplex a[], int *lda,
+ double w[], f2c_doublecomplex work[],
+ int *lwork, double rwork[], int *lrwork, int iwork[],
+ int *liwork,
+ int *info);
+
+extern int
+FNAME(dgelsd)(int *m, int *n, int *nrhs,
+ double a[], int *lda, double b[], int *ldb,
+ double s[], double *rcond, int *rank,
+ double work[], int *lwork, int iwork[],
+ int *info);
+extern int
+FNAME(zgelsd)(int *m, int *n, int *nrhs,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex b[], int *ldb,
+ double s[], double *rcond, int *rank,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[], int iwork[],
+ int *info);
+
+extern int
+FNAME(sgesv)(int *n, int *nrhs,
+ float a[], int *lda,
+ int ipiv[],
+ float b[], int *ldb,
+ int *info);
+extern int
+FNAME(dgesv)(int *n, int *nrhs,
+ double a[], int *lda,
+ int ipiv[],
+ double b[], int *ldb,
+ int *info);
+extern int
+FNAME(cgesv)(int *n, int *nrhs,
+ f2c_complex a[], int *lda,
+ int ipiv[],
+ f2c_complex b[], int *ldb,
+ int *info);
+extern int
+FNAME(zgesv)(int *n, int *nrhs,
+ f2c_doublecomplex a[], int *lda,
+ int ipiv[],
+ f2c_doublecomplex b[], int *ldb,
+ int *info);
+
+extern int
+FNAME(sgetrf)(int *m, int *n,
+ float a[], int *lda,
+ int ipiv[],
+ int *info);
+extern int
+FNAME(dgetrf)(int *m, int *n,
+ double a[], int *lda,
+ int ipiv[],
+ int *info);
+extern int
+FNAME(cgetrf)(int *m, int *n,
+ f2c_complex a[], int *lda,
+ int ipiv[],
+ int *info);
+extern int
+FNAME(zgetrf)(int *m, int *n,
+ f2c_doublecomplex a[], int *lda,
+ int ipiv[],
+ int *info);
+
+extern int
+FNAME(spotrf)(char *uplo, int *n,
+ float a[], int *lda,
+ int *info);
+extern int
+FNAME(dpotrf)(char *uplo, int *n,
+ double a[], int *lda,
+ int *info);
+extern int
+FNAME(cpotrf)(char *uplo, int *n,
+ f2c_complex a[], int *lda,
+ int *info);
+extern int
+FNAME(zpotrf)(char *uplo, int *n,
+ f2c_doublecomplex a[], int *lda,
+ int *info);
+
+extern int
+FNAME(sgesdd)(char *jobz, int *m, int *n,
+ float a[], int *lda, float s[], float u[],
+ int *ldu, float vt[], int *ldvt, float work[],
+ int *lwork, int iwork[], int *info);
+extern int
+FNAME(dgesdd)(char *jobz, int *m, int *n,
+ double a[], int *lda, double s[], double u[],
+ int *ldu, double vt[], int *ldvt, double work[],
+ int *lwork, int iwork[], int *info);
+extern int
+FNAME(cgesdd)(char *jobz, int *m, int *n,
+ f2c_complex a[], int *lda,
+ float s[], f2c_complex u[], int *ldu,
+ f2c_complex vt[], int *ldvt,
+ f2c_complex work[], int *lwork,
+ float rwork[], int iwork[], int *info);
+extern int
+FNAME(zgesdd)(char *jobz, int *m, int *n,
+ f2c_doublecomplex a[], int *lda,
+ double s[], f2c_doublecomplex u[], int *ldu,
+ f2c_doublecomplex vt[], int *ldvt,
+ f2c_doublecomplex work[], int *lwork,
+ double rwork[], int iwork[], int *info);
+
+extern int
+FNAME(spotrs)(char *uplo, int *n, int *nrhs,
+ float a[], int *lda,
+ float b[], int *ldb,
+ int *info);
+extern int
+FNAME(dpotrs)(char *uplo, int *n, int *nrhs,
+ double a[], int *lda,
+ double b[], int *ldb,
+ int *info);
+extern int
+FNAME(cpotrs)(char *uplo, int *n, int *nrhs,
+ f2c_complex a[], int *lda,
+ f2c_complex b[], int *ldb,
+ int *info);
+extern int
+FNAME(zpotrs)(char *uplo, int *n, int *nrhs,
+ f2c_doublecomplex a[], int *lda,
+ f2c_doublecomplex b[], int *ldb,
+ int *info);
+
+extern int
+FNAME(spotri)(char *uplo, int *n,
+ float a[], int *lda,
+ int *info);
+extern int
+FNAME(dpotri)(char *uplo, int *n,
+ double a[], int *lda,
+ int *info);
+extern int
+FNAME(cpotri)(char *uplo, int *n,
+ f2c_complex a[], int *lda,
+ int *info);
+extern int
+FNAME(zpotri)(char *uplo, int *n,
+ f2c_doublecomplex a[], int *lda,
+ int *info);
+
+extern int
+FNAME(scopy)(int *n,
+ float *sx, int *incx,
+ float *sy, int *incy);
+extern int
+FNAME(dcopy)(int *n,
+ double *sx, int *incx,
+ double *sy, int *incy);
+extern int
+FNAME(ccopy)(int *n,
+ f2c_complex *sx, int *incx,
+ f2c_complex *sy, int *incy);
+extern int
+FNAME(zcopy)(int *n,
+ f2c_doublecomplex *sx, int *incx,
+ f2c_doublecomplex *sy, int *incy);
+
+extern float
+FNAME(sdot)(int *n,
+ float *sx, int *incx,
+ float *sy, int *incy);
+extern double
+FNAME(ddot)(int *n,
+ double *sx, int *incx,
+ double *sy, int *incy);
+extern f2c_complex
+FNAME(cdotu)(int *n,
+ f2c_complex *sx, int *incx,
+ f2c_complex *sy, int *incy);
+extern f2c_doublecomplex
+FNAME(zdotu)(int *n,
+ f2c_doublecomplex *sx, int *incx,
+ f2c_doublecomplex *sy, int *incy);
+extern f2c_complex
+FNAME(cdotc)(int *n,
+ f2c_complex *sx, int *incx,
+ f2c_complex *sy, int *incy);
+extern f2c_doublecomplex
+FNAME(zdotc)(int *n,
+ f2c_doublecomplex *sx, int *incx,
+ f2c_doublecomplex *sy, int *incy);
+
+extern int
+FNAME(sgemm)(char *transa, char *transb,
+ int *m, int *n, int *k,
+ float *alpha,
+ float *a, int *lda,
+ float *b, int *ldb,
+ float *beta,
+ float *c, int *ldc);
+extern int
+FNAME(dgemm)(char *transa, char *transb,
+ int *m, int *n, int *k,
+ double *alpha,
+ double *a, int *lda,
+ double *b, int *ldb,
+ double *beta,
+ double *c, int *ldc);
+extern int
+FNAME(cgemm)(char *transa, char *transb,
+ int *m, int *n, int *k,
+ f2c_complex *alpha,
+ f2c_complex *a, int *lda,
+ f2c_complex *b, int *ldb,
+ f2c_complex *beta,
+ f2c_complex *c, int *ldc);
+extern int
+FNAME(zgemm)(char *transa, char *transb,
+ int *m, int *n, int *k,
+ f2c_doublecomplex *alpha,
+ f2c_doublecomplex *a, int *lda,
+ f2c_doublecomplex *b, int *ldb,
+ f2c_doublecomplex *beta,
+ f2c_doublecomplex *c, int *ldc);
+
+
+#define LAPACK_T(FUNC) \
+ TRACE_TXT("Calling LAPACK ( " # FUNC " )\n"); \
+ FNAME(FUNC)
+
+#define BLAS(FUNC) \
+ FNAME(FUNC)
+
+#define LAPACK(FUNC) \
+ FNAME(FUNC)
+
+typedef int fortran_int;
+typedef float fortran_real;
+typedef double fortran_doublereal;
+typedef f2c_complex fortran_complex;
+typedef f2c_doublecomplex fortran_doublecomplex;
+
+
+/*
+ *****************************************************************************
+ ** Some handy functions **
+ *****************************************************************************
+ */
+
+static inline void *
+offset_ptr(void* ptr, ptrdiff_t offset)
+{
+ return (void*)((npy_uint8*)ptr + offset);
+}
+
+static inline int
+get_fp_invalid_and_clear(void)
+{
+ int status;
+ status = PyUFunc_getfperr();
+ return !!(status & UFUNC_FPE_INVALID);
+}
+
+static inline void
+set_fp_invalid_or_clear(int error_occurred)
+{
+ if (error_occurred) {
+ npy_set_floatstatus_invalid();
+ }
+ else {
+ PyUFunc_getfperr();
+ }
+}
+
+/*
+ *****************************************************************************
+ ** Some handy constants **
+ *****************************************************************************
+ */
+
+#define UMATH_LINALG_MODULE_NAME "_umath_linalg"
+
+typedef union {
+ fortran_complex f;
+ npy_cfloat npy;
+ float array[2];
+} COMPLEX_t;
+
+typedef union {
+ fortran_doublecomplex f;
+ npy_cdouble npy;
+ double array[2];
+} DOUBLECOMPLEX_t;
+
+static float s_one;
+static float s_zero;
+static float s_minus_one;
+static float s_ninf;
+static float s_nan;
+static double d_one;
+static double d_zero;
+static double d_minus_one;
+static double d_ninf;
+static double d_nan;
+static COMPLEX_t c_one;
+static COMPLEX_t c_zero;
+static COMPLEX_t c_minus_one;
+static COMPLEX_t c_ninf;
+static COMPLEX_t c_nan;
+static DOUBLECOMPLEX_t z_one;
+static DOUBLECOMPLEX_t z_zero;
+static DOUBLECOMPLEX_t z_minus_one;
+static DOUBLECOMPLEX_t z_ninf;
+static DOUBLECOMPLEX_t z_nan;
+
+static void init_constants(void)
+{
+ /*
+ this is needed as NPY_INFINITY and NPY_NAN macros
+ can't be used as initializers. I prefer to just set
+ all the constants the same way.
+ */
+ s_one = 1.0f;
+ s_zero = 0.0f;
+ s_minus_one = -1.0f;
+ s_ninf = -NPY_INFINITYF;
+ s_nan = NPY_NANF;
+
+ d_one = 1.0;
+ d_zero = 0.0;
+ d_minus_one = -1.0;
+ d_ninf = -NPY_INFINITY;
+ d_nan = NPY_NAN;
+
+ c_one.array[0] = 1.0f;
+ c_one.array[1] = 0.0f;
+ c_zero.array[0] = 0.0f;
+ c_zero.array[1] = 0.0f;
+ c_minus_one.array[0] = -1.0f;
+ c_minus_one.array[1] = 0.0f;
+ c_ninf.array[0] = -NPY_INFINITYF;
+ c_ninf.array[1] = 0.0f;
+ c_nan.array[0] = NPY_NANF;
+ c_nan.array[1] = NPY_NANF;
+
+ z_one.array[0] = 1.0;
+ z_one.array[1] = 0.0;
+ z_zero.array[0] = 0.0;
+ z_zero.array[1] = 0.0;
+ z_minus_one.array[0] = -1.0;
+ z_minus_one.array[1] = 0.0;
+ z_ninf.array[0] = -NPY_INFINITY;
+ z_ninf.array[1] = 0.0;
+ z_nan.array[0] = NPY_NAN;
+ z_nan.array[1] = NPY_NAN;
+}
+
+/*
+ *****************************************************************************
+ ** Structs used for data rearrangement **
+ *****************************************************************************
+ */
+
+
+/* this struct contains information about how to linearize in a local buffer
+ a matrix so that it can be used by blas functions.
+ All strides are specified in number of elements (similar to what blas
+ expects)
+
+ dst_row_strides: number of elements between different row. Matrix is
+ considered row-major
+ dst_column_strides: number of elements between differnt columns in the
+ destination buffer
+ rows: number of rows of the matrix
+ columns: number of columns of the matrix
+ src_row_strides: strides needed to access the next row in the source matrix
+ src_column_strides: strides needed to access the next column in the source
+ matrix
+ */
+typedef struct linearize_data_struct
+{
+ size_t rows;
+ size_t columns;
+ ptrdiff_t row_strides;
+ ptrdiff_t column_strides;
+} LINEARIZE_DATA_t;
+
+static inline void
+init_linearize_data(LINEARIZE_DATA_t *lin_data,
+ int rows,
+ int columns,
+ ptrdiff_t row_strides,
+ ptrdiff_t column_strides)
+{
+ lin_data->rows = rows;
+ lin_data->columns = columns;
+ lin_data->row_strides = row_strides;
+ lin_data->column_strides = column_strides;
+}
+
+static inline void
+dump_ufunc_object(PyUFuncObject* ufunc)
+{
+ TRACE_TXT("\n\n%s '%s' (%d input(s), %d output(s), %d specialization(s).\n",
+ ufunc->core_enabled? "generalized ufunc" : "scalar ufunc",
+ ufunc->name, ufunc->nin, ufunc->nout, ufunc->ntypes);
+ if (ufunc->core_enabled) {
+ int arg;
+ int dim;
+ TRACE_TXT("\t%s (%d dimension(s) detected).\n",
+ ufunc->core_signature, ufunc->core_num_dim_ix);
+
+ for (arg = 0; arg < ufunc->nargs; arg++){
+ int * arg_dim_ix = ufunc->core_dim_ixs + ufunc->core_offsets[arg];
+ TRACE_TXT("\t\targ %d (%s) has %d dimension(s): (",
+ arg, arg < ufunc->nin? "INPUT" : "OUTPUT",
+ ufunc->core_num_dims[arg]);
+ for (dim = 0; dim < ufunc->core_num_dims[arg]; dim ++) {
+ TRACE_TXT(" %d", arg_dim_ix[dim]);
+ }
+ TRACE_TXT(" )\n");
+ }
+ }
+}
+
+static inline void
+dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params)
+{
+ TRACE_TXT("\n\t%s rows: %zd columns: %zd"\
+ "\n\t\trow_strides: %td column_strides: %td"\
+ "\n", name, params->rows, params->columns,
+ params->row_strides, params->column_strides);
+}
+
+
+static inline float
+FLOAT_add(float op1, float op2)
+{
+ return op1 + op2;
+}
+
+static inline double
+DOUBLE_add(double op1, double op2)
+{
+ return op1 + op2;
+}
+
+static inline COMPLEX_t
+CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2)
+{
+ COMPLEX_t result;
+ result.array[0] = op1.array[0] + op2.array[0];
+ result.array[1] = op1.array[1] + op2.array[1];
+
+ return result;
+}
+
+static inline DOUBLECOMPLEX_t
+CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
+{
+ DOUBLECOMPLEX_t result;
+ result.array[0] = op1.array[0] + op2.array[0];
+ result.array[1] = op1.array[1] + op2.array[1];
+
+ return result;
+}
+
+static inline float
+FLOAT_mul(float op1, float op2)
+{
+ return op1*op2;
+}
+
+static inline double
+DOUBLE_mul(double op1, double op2)
+{
+ return op1*op2;
+}
+
+
+static inline COMPLEX_t
+CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2)
+{
+ COMPLEX_t result;
+ result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
+ result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
+
+ return result;
+}
+
+static inline DOUBLECOMPLEX_t
+CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
+{
+ DOUBLECOMPLEX_t result;
+ result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1];
+ result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1];
+
+ return result;
+}
+
+static inline float
+FLOAT_mulc(float op1, float op2)
+{
+ return op1*op2;
+}
+
+static inline double
+DOUBLE_mulc(float op1, float op2)
+{
+ return op1*op2;
+}
+
+static inline COMPLEX_t
+CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2)
+{
+ COMPLEX_t result;
+ result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
+ result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
+
+ return result;
+}
+
+static inline DOUBLECOMPLEX_t
+CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2)
+{
+ DOUBLECOMPLEX_t result;
+ result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1];
+ result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0];
+
+ return result;
+}
+
+static inline void
+print_FLOAT(npy_float s)
+{
+ TRACE_TXT(" %8.4f", s);
+}
+static inline void
+print_DOUBLE(npy_double d)
+{
+ TRACE_TXT(" %10.6f", d);
+}
+static inline void
+print_CFLOAT(npy_cfloat c)
+{
+ float* c_parts = (float*)&c;
+ TRACE_TXT("(%8.4f, %8.4fj)", c_parts[0], c_parts[1]);
+}
+static inline void
+print_CDOUBLE(npy_cdouble z)
+{
+ double* z_parts = (double*)&z;
+ TRACE_TXT("(%8.4f, %8.4fj)", z_parts[0], z_parts[1]);
+}
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=npy_float,npy_double,npy_cfloat,npy_cdouble#
+ */
+static inline void
+dump_@TYPE@_matrix(const char* name,
+ size_t rows, size_t columns,
+ const @typ@* ptr)
+{
+ size_t i,j;
+
+ TRACE_TXT("\n%s %p (%zd, %zd)\n", name, ptr, rows, columns);
+ for (i=0; i<rows; i++)
+ {
+ TRACE_TXT("| ");
+ for (j=0; j<columns; j++)
+ {
+ print_@TYPE@(ptr[j*rows + i]);
+ TRACE_TXT(", ");
+ }
+ TRACE_TXT(" |\n");
+ }
+}
+/**end repeat**/
+
+
+/*
+ *****************************************************************************
+ ** Basics **
+ *****************************************************************************
+ */
+
+#define INIT_OUTER_LOOP_1 \
+ npy_intp dN = *dimensions++;\
+ npy_intp N_;\
+ npy_intp s0 = *steps++;
+
+#define INIT_OUTER_LOOP_2 \
+ INIT_OUTER_LOOP_1\
+ npy_intp s1 = *steps++;
+
+#define INIT_OUTER_LOOP_3 \
+ INIT_OUTER_LOOP_2\
+ npy_intp s2 = *steps++;
+
+#define INIT_OUTER_LOOP_4 \
+ INIT_OUTER_LOOP_3\
+ npy_intp s3 = *steps++;
+
+#define INIT_OUTER_LOOP_5 \
+ INIT_OUTER_LOOP_4\
+ npy_intp s4 = *steps++;
+
+#define INIT_OUTER_LOOP_6 \
+ INIT_OUTER_LOOP_5\
+ npy_intp s5 = *steps++;
+
+#define BEGIN_OUTER_LOOP_2 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1) {
+
+#define BEGIN_OUTER_LOOP_3 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1,\
+ args[2] += s2) {
+
+#define BEGIN_OUTER_LOOP_4 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1,\
+ args[2] += s2,\
+ args[3] += s3) {
+
+#define BEGIN_OUTER_LOOP_5 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1,\
+ args[2] += s2,\
+ args[3] += s3,\
+ args[4] += s4) {
+
+#define BEGIN_OUTER_LOOP_6 \
+ for (N_ = 0;\
+ N_ < dN;\
+ N_++, args[0] += s0,\
+ args[1] += s1,\
+ args[2] += s2,\
+ args[3] += s3,\
+ args[4] += s4,\
+ args[5] += s5) {
+
+#define END_OUTER_LOOP }
+
+static inline void
+update_pointers(npy_uint8** bases, ptrdiff_t* offsets, size_t count)
+{
+ size_t i;
+ for (i=0; i < count; ++i) {
+ bases[i] += offsets[i];
+ }
+}
+
+
+/* disable -Wmaybe-uninitialized as there is some code that generate false
+ positives with this warning
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+
+/*
+ *****************************************************************************
+ ** HELPER FUNCS **
+ *****************************************************************************
+ */
+
+ /* rearranging of 2D matrices using blas */
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #copy=scopy,dcopy,ccopy,zcopy#
+ #nan=s_nan, d_nan, c_nan, z_nan#
+ */
+static inline void *
+linearize_@TYPE@_matrix(void *dst_in,
+ void *src_in,
+ const LINEARIZE_DATA_t* data)
+{
+ @typ@ *src = (@typ@ *) src_in;
+ @typ@ *dst = (@typ@ *) dst_in;
+
+ if (dst) {
+ int i;
+ @typ@* rv = dst;
+ fortran_int columns = (fortran_int)data->columns;
+ fortran_int column_strides =
+ (fortran_int)(data->column_strides/sizeof(@typ@));
+ fortran_int one = 1;
+ for (i=0; i< data->rows; i++) {
+ FNAME(@copy@)(&columns,
+ (void*)src, &column_strides,
+ (void*)dst, &one);
+ src += data->row_strides/sizeof(@typ@);
+ dst += data->columns;
+ }
+ return rv;
+ } else {
+ return src;
+ }
+}
+
+static inline void *
+delinearize_@TYPE@_matrix(void *dst_in,
+ void *src_in,
+ const LINEARIZE_DATA_t* data)
+{
+ @typ@ *src = (@typ@ *) src_in;
+ @typ@ *dst = (@typ@ *) dst_in;
+
+ if (src) {
+ int i;
+ @typ@ *rv = src;
+ fortran_int columns = (fortran_int)data->columns;
+ fortran_int column_strides =
+ (fortran_int)(data->column_strides/sizeof(@typ@));
+ fortran_int one = 1;
+ for (i=0; i < data->rows; i++) {
+ FNAME(@copy@)(&columns,
+ (void*)src, &one,
+ (void*)dst, &column_strides);
+ src += data->columns;
+ dst += data->row_strides/sizeof(@typ@);
+ }
+
+ return rv;
+ } else {
+ return src;
+ }
+}
+
+static inline void
+nan_@TYPE@_matrix(void *dst_in, const LINEARIZE_DATA_t* data)
+{
+ @typ@ *dst = (@typ@ *) dst_in;
+
+ int i,j;
+ for (i=0; i < data->rows; i++) {
+ @typ@ *cp = dst;
+ ptrdiff_t cs = data->column_strides/sizeof(@typ@);
+ for (j=0; j< data->columns; ++j) {
+ *cp = @nan@;
+ cp += cs;
+ }
+ dst += data->row_strides/sizeof(@typ@);
+ }
+}
+
+static inline void
+make_symmetric_@TYPE@_matrix(char UPLO,
+ fortran_int n,
+ void *matrix)
+{
+ /*
+ note: optimized assuming that sequential write is better than
+ sequential read
+ */
+ fortran_int i;
+ fortran_int one = 1;
+
+ /* note: 'L' and 'U' are interpreted considering *fortran* order */
+ if ('L' == UPLO) {
+ @typ@ *dst = (@typ@ *) matrix;
+ @typ@ *src = (@typ@ *) matrix;
+ src += 1;
+ dst += n;
+ for (i = 1; i < n; ++i) {
+ FNAME(@copy@)(&i,
+ (void *)src, &n,
+ (void *)dst, &one);
+ src += 1;
+ dst += n;
+ }
+ } else { /* assume U */
+ @typ@ *dst = (@typ@ *) matrix;
+ @typ@ *src = (@typ@ *) matrix;
+ src += n;
+ dst += 1;
+ for (i = n - 1; i > 0; --i) {
+ FNAME(@copy@)(&i,
+ (void *)src, &n,
+ (void *)dst, &one);
+ src += n + 1;
+ dst += n + 1;
+ }
+ }
+}
+/**end repeat**/
+
+ /* identity square matrix generation */
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #cblas_type=s,d,c,z#
+ */
+static inline void
+identity_@TYPE@_matrix(void *ptr, size_t n)
+{
+ size_t i;
+ @typ@ *matrix = (@typ@*) ptr;
+ /* in IEEE floating point, zeroes are represented as bitwise 0 */
+ memset(matrix, 0, n*n*sizeof(@typ@));
+
+ for (i = 0; i < n; ++i)
+ {
+ *matrix = @cblas_type@_one;
+ matrix += n+1;
+ }
+}
+/**end repeat**/
+
+ /* lower/upper triangular matrix using blas (in place) */
+/**begin repeat
+
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #cblas_type=s,d,c,z#
+ */
+static inline void
+tril_@TYPE@_matrix(void *ptr, size_t n)
+{
+ size_t i,j;
+ @typ@ *matrix = (@typ@*)ptr;
+ matrix++;
+ for (i = n-1; i > 0; --i) {
+ for (j = 0; j < i; ++j) {
+ *matrix = @cblas_type@_zero;
+ }
+ matrix += n + 1;
+ }
+}
+
+static inline void
+triu_@TYPE@_matrix(void *ptr, size_t n)
+{
+ size_t i,j;
+ @typ@ *matrix = (@typ@*)ptr;
+ matrix += n;
+ for (i=1; i < n; ++i) {
+ for (j=0; j<i; ++j) {
+ matrix[j] = @cblas_type@_zero;
+ }
+ matrix += n;
+ }
+}
+/**end repeat**/
+
+/*
+ *****************************************************************************
+ ** UFUNC LOOPS **
+ *****************************************************************************
+ */
+
+/**begin repeat
+ #typ=npy_float,npy_double,COMPLEX_t,DOUBLECOMPLEX_t,COMPLEX_t,DOUBLECOMPLEX_t#
+ #ftyp=fortran_real,fortran_doublereal,fortran_complex,fortran_doublecomplex,fortran_complex,fortran_doublecomplex#
+ #add=FLOAT_add,DOUBLE_add,CFLOAT_add,CDOUBLE_add,CFLOAT_add,CDOUBLE_add#
+ #mul=FLOAT_mul,DOUBLE_mul,CFLOAT_mul,CDOUBLE_mul,CFLOAT_mulc,CDOUBLE_mulc#
+ #dot=sdot,ddot,cdotu,zdotu,cdotc,zdotc#
+ #zero=s_zero, d_zero,c_zero,z_zero,c_zero,z_zero#
+ */
+
+static inline void
+@dot@_inner1d(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+ const size_t sot = sizeof(@typ@);
+ int dim = (int)dimensions[0];
+ INIT_OUTER_LOOP_3
+ /*
+ * use blas if the stride is a multiple of datatype size in the inputs
+ * it should be the common case
+ */
+ if ((0 == (steps[3] % sot)) &&
+ (0 == (steps[4] % sot))) {
+ /* use blas */
+
+ int is1 = (int)(steps[0]/sot), is2 = (int)(steps[1]/sot);
+ BEGIN_OUTER_LOOP_3
+ @ftyp@ * ip1 = (@ftyp@*)args[0], *ip2 = (@ftyp@*)args[1];
+ *(@ftyp@*)(args[2]) = BLAS(@dot@)(&dim, ip1, &is1, ip2, &is2);
+ END_OUTER_LOOP
+ } else {
+ /* use standard version */
+ npy_intp is1=steps[0], is2=steps[1];
+ BEGIN_OUTER_LOOP_3
+ int i;
+ char *ip1=args[0], *ip2=args[1], *op=args[2];
+ @typ@ sum = @zero@;
+ for (i = 0; i < dim; i++) {
+ @typ@ prod = @mul@(*(@typ@ *)ip1, *(@typ@*)ip2);
+ sum = @add@(sum, prod);
+ ip1 += is1;
+ ip2 += is2;
+ }
+ *(@typ@ *)op = sum;
+ END_OUTER_LOOP
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #dot=sdot,ddot,cdotu,zdotu#
+ #dotc=sdot,ddot,cdotc,zdotc#
+ */
+static void
+@TYPE@_inner1d(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @dot@_inner1d(args, dimensions, steps);
+}
+
+static void
+@TYPE@_dotc1d(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @dotc@_inner1d(args, dimensions, steps);
+}
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=npy_float,npy_double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #add=FLOAT_add,DOUBLE_add,CFLOAT_add,CDOUBLE_add#
+ #mul=FLOAT_mul,DOUBLE_mul,CFLOAT_mul,CDOUBLE_mul#
+ #zero=s_zero, d_zero,c_zero,z_zero#
+*/
+
+/*
+ * This implements the function
+ * out[n] = sum_i { in1[n, i] * in2[n, i] * in3[n, i] }.
+ */
+
+static void
+@TYPE@_innerwt(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_4
+ npy_intp di = dimensions[0];
+ npy_intp i;
+ npy_intp is1=steps[0], is2=steps[1], is3=steps[2];
+ BEGIN_OUTER_LOOP_4
+ char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3];
+ @typ@ sum = @zero@;
+ for (i = 0; i < di; i++) {
+ @typ@ prod = @mul@(@mul@(*(@typ@*)ip1, *(@typ@*)ip2),*(@typ@*)ip3);
+ sum = @add@(sum, prod);
+ ip1 += is1;
+ ip2 += is2;
+ ip3 += is3;
+ }
+ *(@typ@ *)op = sum;
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
+
+/* -------------------------------------------------------------------------- */
+ /* Matrix Multiply */
+
+typedef struct gemm_params_struct
+{
+ void *A;
+ void *B;
+ void *C;
+ fortran_int LDA;
+ fortran_int LDB;
+ fortran_int LDC;
+ fortran_int M;
+ fortran_int K;
+ fortran_int N;
+ char TRANSA;
+ char TRANSB;
+
+ void *allocated_data;
+} GEMM_PARAMS_t;
+
+static inline void
+dump_gemm_params(const char* name, const GEMM_PARAMS_t* params)
+{
+ TRACE_TXT("\n%s\n"
+ "%14s: %18p\n" "%14s: %18p\n" "%14s: %18p\n"
+ "%14s: %18d\n" "%14s: %18d\n" "%14s: %18d\n"
+ "%14s: %18d\n" "%14s: %18d\n" "%14s: %18d\n"
+ "%14s: %15c'%c'\n" "%14s: %15c'%c'\n",
+ name,
+ "A", params->A, "B", params->B, "C", params->C,
+ "LDA", params->LDA, "LDB", params->LDB, "LDC", params->LDC,
+ "M", params->M, "K", params->K, "N", params->N,
+ "TRANSA", ' ', params->TRANSA, "TRANSB", ' ', params->TRANSB);
+}
+
+
+typedef struct _matrix_desc {
+ int need_buff;
+ fortran_int lead;
+ size_t size;
+ void *buff;
+} matrix_desc;
+
+static inline void
+matrix_desc_init(matrix_desc *mtxd,
+ npy_intp* steps,
+ size_t sot,
+ fortran_int rows,
+ fortran_int columns)
+/* initialized a matrix desc based on the information in steps
+ it will fill the matrix desc with the values needed.
+ steps[0] contains column step
+ steps[1] contains row step
+*/
+{
+ /* if column step is not element step, a copy will be needed
+ to arrange the array in a way compatible with blas
+ */
+ mtxd->need_buff = steps[0] != sot;
+
+ if (!mtxd->need_buff) {
+ if (steps[1]) {
+ mtxd->lead = (fortran_int) steps[1] / sot;
+ } else {
+ /* step is 0, this means either it is only 1 column or
+ there is a step trick around*/
+ if (columns > 1) {
+ /* step tricks not supported in gemm... make a copy */
+ mtxd->need_buff = 1;
+ } else {
+ /* lead must be at least 1 */
+ mtxd->lead = rows;
+ }
+ }
+ }
+
+ /* if a copy is performed, the lead is the number of columns */
+ if (mtxd->need_buff) {
+ mtxd->lead = rows;
+ }
+
+ mtxd->size = rows*columns*sot*mtxd->need_buff;
+ mtxd->buff = NULL;
+}
+
+static inline npy_uint8 *
+matrix_desc_assign_buff(matrix_desc* mtxd, npy_uint8 *p)
+{
+ if (mtxd->need_buff) {
+ mtxd->buff = p;
+ return p + mtxd->size;
+ } else {
+ return p;
+ }
+}
+
+static inline int
+init_gemm_params(GEMM_PARAMS_t *params,
+ fortran_int m,
+ fortran_int k,
+ fortran_int n,
+ npy_intp* steps,
+ size_t sot)
+{
+ npy_uint8 *mem_buff = NULL;
+ matrix_desc a, b, c;
+ matrix_desc_init(&a, steps + 0, sot, m, k);
+ matrix_desc_init(&b, steps + 2, sot, k, n);
+ matrix_desc_init(&c, steps + 4, sot, m, n);
+
+ if (a.size + b.size + c.size)
+ {
+ npy_uint8 *current;
+ /* need to allocate some buffer */
+ mem_buff = malloc(a.size + b.size + c.size);
+ if (!mem_buff)
+ goto error;
+
+ current = mem_buff;
+ current = matrix_desc_assign_buff(&a, current);
+ current = matrix_desc_assign_buff(&b, current);
+ current = matrix_desc_assign_buff(&c, current);
+ }
+
+ params->A = a.buff;
+ params->B = b.buff;
+ params->C = c.buff;
+ params->LDA = a.lead;
+ params->LDB = b.lead;
+ params->LDC = c.lead;
+ params->M = m;
+ params->N = n;
+ params->K = k;
+ params->TRANSA='N';
+ params->TRANSB='N';
+
+ params->allocated_data = mem_buff;
+
+ return 1;
+ error:
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+ return 0;
+}
+
+static inline void
+release_gemm_params(GEMM_PARAMS_t * params)
+{
+ free(params->allocated_data);
+ params->allocated_data = NULL;
+}
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=npy_float,npy_double,npy_cfloat,npy_cdouble#
+ #one=s_one,d_one,c_one.array,z_one.array#
+ #zero=s_zero,d_zero,c_zero.array,z_zero.array#
+ #GEMM=sgemm,dgemm,cgemm,zgemm#
+*/
+
+static void
+@TYPE@_matrix_multiply(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ /*
+ * everything is setup in a way that makes things work. BLAS gemm can be
+ * be called without rearranging nor using weird stuff, as matrices are
+ * in the expected way in memory.
+ * This is just a loop calling blas.
+ */
+ GEMM_PARAMS_t params;
+ fortran_int m, k, n;
+
+ INIT_OUTER_LOOP_3
+
+ m = (fortran_int) dimensions[0];
+ k = (fortran_int) dimensions[1];
+ n = (fortran_int) dimensions[2];
+
+ if (init_gemm_params(&params, m, k, n, steps, sizeof(@typ@)))
+ {
+ LINEARIZE_DATA_t a_in, b_in, c_out;
+
+ if (params.A)
+ init_linearize_data(&a_in, k, m, steps[1], steps[0]);
+ if (params.B)
+ init_linearize_data(&b_in, n, k, steps[3], steps[2]);
+ if (params.C)
+ init_linearize_data(&c_out, n, m, steps[5], steps[4]);
+
+ BEGIN_OUTER_LOOP_3
+ /* just call the appropriate multiply and update pointers */
+ @typ@ *A = linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ @typ@ *B = linearize_@TYPE@_matrix(params.B, args[1], &b_in);
+ @typ@ *C = params.C ? (@typ@*)params.C : (@typ@ *) args[2];
+
+ /* linearize source operands if needed */
+ FNAME(@GEMM@)(&params.TRANSA, &params.TRANSB,
+ &params.M, &params.N, &params.K,
+ (void*)&@one@, // alpha
+ (void*)A, &params.LDA,
+ (void*)B, &params.LDB,
+ (void*)&@zero@, // beta
+ (void*)C, &params.LDC);
+ delinearize_@TYPE@_matrix(args[2], params.C, &c_out);
+ END_OUTER_LOOP
+
+ release_gemm_params(&params);
+ }
+}
+/**end repeat**/
+
+
+/* -------------------------------------------------------------------------- */
+ /* Determinants */
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE#
+ #typ=npy_float, npy_double#
+ #log_func=npy_logf,npy_log#
+ #exp_func=npy_expf,npy_exp#
+ #zero=0.0f,0.0#
+*/
+
+static inline void
+@TYPE@_slogdet_from_factored_diagonal(@typ@* src,
+ fortran_int m,
+ @typ@ *sign,
+ @typ@ *logdet)
+{
+ @typ@ acc_sign = *sign;
+ @typ@ acc_logdet = @zero@;
+ int i;
+ for (i = 0; i < m; i++) {
+ @typ@ abs_element = *src;
+ if (abs_element < @zero@) {
+ acc_sign = -acc_sign;
+ abs_element = -abs_element;
+ }
+
+ acc_logdet += @log_func@(abs_element);
+ src += m+1;
+ }
+
+ *sign = acc_sign;
+ *logdet = acc_logdet;
+}
+
+static inline @typ@
+@TYPE@_det_from_slogdet(@typ@ sign, @typ@ logdet)
+{
+ @typ@ result = sign * @exp_func@(logdet);
+ return result;
+}
+
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE#
+ #typ=npy_cfloat, npy_cdouble#
+ #basetyp=npy_float, npy_double#
+ #abs_func=npy_cabsf, npy_cabs#
+ #log_func=npy_logf, npy_log#
+ #exp_func=npy_expf, npy_exp#
+ #zero=0.0f,0.0#
+*/
+#define RE(COMPLEX) (((@basetyp@*)(&COMPLEX))[0])
+#define IM(COMPLEX) (((@basetyp@*)(&COMPLEX))[1])
+
+static inline @typ@
+@TYPE@_mult(@typ@ op1, @typ@ op2)
+{
+ @typ@ rv;
+
+ RE(rv) = RE(op1)*RE(op2) - IM(op1)*IM(op2);
+ IM(rv) = RE(op1)*IM(op2) + IM(op1)*RE(op2);
+
+ return rv;
+}
+
+
+static inline void
+@TYPE@_slogdet_from_factored_diagonal(@typ@* src,
+ fortran_int m,
+ @typ@ *sign,
+ @basetyp@ *logdet)
+{
+ int i;
+ @typ@ sign_acc = *sign;
+ @basetyp@ logdet_acc = @zero@;
+
+ for (i = 0; i < m; i++)
+ {
+ @basetyp@ abs_element = @abs_func@(*src);
+ @typ@ sign_element;
+ RE(sign_element) = RE(*src) / abs_element;
+ IM(sign_element) = IM(*src) / abs_element;
+
+ sign_acc = @TYPE@_mult(sign_acc, sign_element);
+ logdet_acc += @log_func@(abs_element);
+ src += m + 1;
+ }
+
+ *sign = sign_acc;
+ *logdet = logdet_acc;
+}
+
+static inline @typ@
+@TYPE@_det_from_slogdet(@typ@ sign, @basetyp@ logdet)
+{
+ @typ@ tmp;
+ RE(tmp) = @exp_func@(logdet);
+ IM(tmp) = @zero@;
+ return @TYPE@_mult(sign, tmp);
+}
+#undef RE
+#undef IM
+/**end repeat**/
+
+
+/* As in the linalg package, the determinant is computed via LU factorization
+ * using LAPACK.
+ * slogdet computes sign + log(determinant).
+ * det computes sign * exp(slogdet).
+ */
+/**begin repeat
+
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=npy_float,npy_double,npy_cfloat,npy_cdouble#
+ #basetyp=npy_float,npy_double,npy_float,npy_double#
+ #cblas_type=s,d,c,z#
+*/
+
+static inline void
+@TYPE@_slogdet_single_element(fortran_int m,
+ void* src,
+ fortran_int* pivots,
+ @typ@ *sign,
+ @basetyp@ *logdet)
+{
+ fortran_int info = 0;
+ int i;
+ /* note: done in place */
+ LAPACK(@cblas_type@getrf)(&m, &m, (void *)src, &m, pivots, &info);
+
+ if (info == 0)
+ {
+ int change_sign = 0;
+ /* note: fortran uses 1 based indexing */
+ for (i=0; i < m; i++)
+ {
+ change_sign += (pivots[i] != (i+1));
+ }
+
+ memcpy(sign,
+ (change_sign % 2)?
+ &@cblas_type@_minus_one :
+ &@cblas_type@_one
+ , sizeof(*sign));
+ @TYPE@_slogdet_from_factored_diagonal(src, m, sign, logdet);
+ } else {
+ /*
+ if getrf fails, use 0 as sign and -inf as logdet
+ */
+ memcpy(sign, &@cblas_type@_zero, sizeof(*sign));
+ memcpy(logdet, &@cblas_type@_ninf, sizeof(*logdet));
+ }
+}
+
+static void
+@TYPE@_slogdet(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ fortran_int m;
+ npy_uint8 *tmp_buff = NULL;
+ size_t matrix_size;
+ size_t pivot_size;
+ /* notes:
+ * matrix will need to be copied always, as factorization in lapack is
+ * made inplace
+ * matrix will need to be in column-major order, as expected by lapack
+ * code (fortran)
+ * always a square matrix
+ * need to allocate memory for both, matrix_buffer and pivot buffer
+ */
+ INIT_OUTER_LOOP_3
+ m = (fortran_int) dimensions[0];
+ matrix_size = m*m*sizeof(@typ@);
+ pivot_size = m*sizeof(fortran_int);
+ tmp_buff = (npy_uint8 *)malloc(matrix_size + pivot_size);
+
+ if (tmp_buff)
+ {
+ LINEARIZE_DATA_t lin_data;
+ /* swapped steps to get matrix in FORTRAN order */
+ init_linearize_data(&lin_data, m, m,
+ (ptrdiff_t)steps[1],
+ (ptrdiff_t)steps[0]);
+ BEGIN_OUTER_LOOP_3
+ linearize_@TYPE@_matrix(tmp_buff, args[0], &lin_data);
+ @TYPE@_slogdet_single_element(m,
+ (void*)tmp_buff,
+ (fortran_int*)(tmp_buff+matrix_size),
+ (@typ@*)args[1],
+ (@basetyp@*)args[2]);
+ END_OUTER_LOOP
+
+ free(tmp_buff);
+ }
+}
+
+static void
+@TYPE@_det(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ fortran_int m;
+ npy_uint8 *tmp_buff;
+ size_t matrix_size;
+ size_t pivot_size;
+ /* notes:
+ * matrix will need to be copied always, as factorization in lapack is
+ * made inplace
+ * matrix will need to be in column-major order, as expected by lapack
+ * code (fortran)
+ * always a square matrix
+ * need to allocate memory for both, matrix_buffer and pivot buffer
+ */
+ INIT_OUTER_LOOP_2
+ m = (fortran_int) dimensions[0];
+ matrix_size = m*m*sizeof(@typ@);
+ pivot_size = m*sizeof(fortran_int);
+ tmp_buff = (npy_uint8 *)malloc(matrix_size + pivot_size);
+
+ if (tmp_buff)
+ {
+ LINEARIZE_DATA_t lin_data;
+ @typ@ sign;
+ @basetyp@ logdet;
+ /* swapped steps to get matrix in FORTRAN order */
+ init_linearize_data(&lin_data, m, m,
+ (ptrdiff_t)steps[1],
+ (ptrdiff_t)steps[0]);
+
+ BEGIN_OUTER_LOOP_2
+ linearize_@TYPE@_matrix(tmp_buff, args[0], &lin_data);
+ @TYPE@_slogdet_single_element(m,
+ (void*)tmp_buff,
+ (fortran_int*)(tmp_buff+matrix_size),
+ &sign,
+ &logdet);
+ *(@typ@ *)args[1] = @TYPE@_det_from_slogdet(sign, logdet);
+ END_OUTER_LOOP
+
+ free(tmp_buff);
+ }
+}
+/**end repeat**/
+
+
+/* -------------------------------------------------------------------------- */
+ /* Eigh family */
+
+typedef struct eigh_params_struct {
+ void *A; /* matrix */
+ void *W; /* eigenvalue vector */
+ void *WORK; /* main work buffer */
+ void *RWORK; /* secondary work buffer (for complex versions) */
+ void *IWORK;
+ fortran_int N;
+ fortran_int LWORK;
+ fortran_int LRWORK;
+ fortran_int LIWORK;
+ char JOBZ;
+ char UPLO;
+} EIGH_PARAMS_t;
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE#
+ #typ=npy_float,npy_double#
+ #ftyp=fortran_real,fortran_doublereal#
+ #lapack_func=ssyevd,dsyevd#
+*/
+
+/*
+ * Initialize the parameters to use in for the lapack function _syevd
+ * Handles buffer allocation
+ */
+static inline int
+init_@lapack_func@(EIGH_PARAMS_t* params, char JOBZ, char UPLO,
+ fortran_int N)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ @typ@ query_work_size;
+ fortran_int query_iwork_size;
+ fortran_int lwork = -1;
+ fortran_int liwork = -1;
+ fortran_int info;
+ npy_uint8 *a, *w, *work, *iwork;
+ size_t alloc_size = N*(N+1)*sizeof(@typ@);
+
+ mem_buff = malloc(alloc_size);
+
+ if (!mem_buff)
+ goto error;
+ a = mem_buff;
+ w = mem_buff + N*N*sizeof(@typ@);
+ LAPACK(@lapack_func@)(&JOBZ, &UPLO, &N,
+ (@ftyp@*)a, &N, (@ftyp@*)w,
+ &query_work_size, &lwork,
+ &query_iwork_size, &liwork,
+ &info);
+
+ if (info != 0)
+ goto error;
+
+ work = mem_buff;
+ lwork = (fortran_int)query_work_size;
+ liwork = query_iwork_size;
+ mem_buff2 = malloc(lwork*sizeof(@typ@) + liwork*sizeof(fortran_int));
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+ iwork = mem_buff2 + lwork*sizeof(@typ@);
+
+ params->A = a;
+ params->W = w;
+ params->WORK = work;
+ params->RWORK = NULL; /* unused */
+ params->IWORK = iwork;
+ params->N = N;
+ params->LWORK = lwork;
+ params->LRWORK = 0; /* unused */
+ params->LIWORK = liwork;
+ params->JOBZ = JOBZ;
+ params->UPLO = UPLO;
+
+ return 1;
+
+ error:
+ /* something failed */
+ memset(params, 0, sizeof(*params));
+ free(mem_buff2);
+ free(mem_buff);
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(EIGH_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->JOBZ, &params->UPLO, &params->N,
+ params->A, &params->N, params->W,
+ params->WORK, &params->LWORK,
+ params->IWORK, &params->LIWORK,
+ &rv);
+ return rv;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE#
+ #typ=npy_cfloat,npy_cdouble#
+ #basetyp=npy_float,npy_double#
+ #ftyp=fortran_complex,fortran_doublecomplex#
+ #fbasetyp=fortran_real,fortran_doublereal#
+ #lapack_func=cheevd,zheevd#
+*/
+/*
+ * Initialize the parameters to use in for the lapack function _heev
+ * Handles buffer allocation
+ */
+static inline int
+init_@lapack_func@(EIGH_PARAMS_t *params,
+ char JOBZ,
+ char UPLO,
+ fortran_int N)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ @ftyp@ query_work_size;
+ @fbasetyp@ query_rwork_size;
+ fortran_int query_iwork_size;
+ fortran_int lwork = -1;
+ fortran_int lrwork = -1;
+ fortran_int liwork = -1;
+ npy_uint8 *a, *w, *work, *rwork, *iwork;
+ fortran_int info;
+
+ mem_buff = malloc(N*N*sizeof(@typ@)+N*sizeof(@basetyp@));
+ if (!mem_buff)
+ goto error;
+ a = mem_buff;
+ w = mem_buff+N*N*sizeof(@typ@);
+
+ LAPACK(@lapack_func@)(&JOBZ, &UPLO, &N,
+ (@ftyp@*)a, &N, (@fbasetyp@*)w,
+ &query_work_size, &lwork,
+ &query_rwork_size, &lrwork,
+ &query_iwork_size, &liwork,
+ &info);
+ if (info != 0)
+ goto error;
+
+ lwork = (fortran_int)*(@fbasetyp@*)&query_work_size;
+ lrwork = (fortran_int)query_rwork_size;
+ liwork = query_iwork_size;
+
+ mem_buff2 = malloc(lwork*sizeof(@typ@) +
+ lrwork*sizeof(@basetyp@) +
+ liwork*sizeof(fortran_int));
+ if (!mem_buff2)
+ goto error;
+ work = mem_buff2;
+ rwork = work + lwork*sizeof(@typ@);
+ iwork = rwork + lrwork*sizeof(@basetyp@);
+
+ params->A = a;
+ params->W = w;
+ params->WORK = work;
+ params->RWORK = rwork;
+ params->IWORK = iwork;
+ params->N = N;
+ params->LWORK = lwork;
+ params->LRWORK = lrwork;
+ params->LIWORK = liwork;
+ params->JOBZ = JOBZ;
+ params->UPLO = UPLO;
+
+ return 1;
+
+ /* something failed */
+error:
+ memset(params, 0, sizeof(*params));
+ free(mem_buff2);
+ free(mem_buff);
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(EIGH_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->JOBZ, &params->UPLO, &params->N,
+ params->A, &params->N, params->W,
+ params->WORK, &params->LWORK,
+ params->RWORK, &params->LRWORK,
+ params->IWORK, &params->LIWORK,
+ &rv);
+ return rv;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #BASETYPE=FLOAT,DOUBLE,FLOAT,DOUBLE#
+ #typ=npy_float,npy_double,npy_cfloat,npy_cdouble#
+ #basetyp=npy_float,npy_double,npy_float,npy_double#
+ #lapack_func=ssyevd,dsyevd,cheevd,zheevd#
+**/
+/*
+ * (M,M)->(M,)(M,M)
+ * dimensions[1] -> M
+ * args[0] -> A[in]
+ * args[1] -> W
+ * args[2] -> A[out]
+ */
+
+static inline void
+release_@lapack_func@(EIGH_PARAMS_t *params)
+{
+ /* allocated memory in A and WORK */
+ free(params->A);
+ free(params->WORK);
+ memset(params, 0, sizeof(*params));
+}
+
+
+static inline void
+@TYPE@_eigh_wrapper(char JOBZ,
+ char UPLO,
+ char**args,
+ npy_intp* dimensions,
+ npy_intp* steps)
+{
+ ptrdiff_t outer_steps[3];
+ size_t iter;
+ size_t outer_dim = *dimensions++;
+ size_t op_count = (JOBZ=='N')?2:3;
+ EIGH_PARAMS_t eigh_params;
+ int error_occurred = get_fp_invalid_and_clear();
+
+ for (iter=0; iter < op_count; ++iter) {
+ outer_steps[iter] = (ptrdiff_t) steps[iter];
+ }
+ steps += op_count;
+
+ if (init_@lapack_func@(&eigh_params,
+ JOBZ,
+ UPLO,
+ (fortran_int)dimensions[0])) {
+ LINEARIZE_DATA_t matrix_in_ld;
+ LINEARIZE_DATA_t eigenvectors_out_ld;
+ LINEARIZE_DATA_t eigenvalues_out_ld;
+
+ init_linearize_data(&matrix_in_ld,
+ eigh_params.N, eigh_params.N,
+ steps[1], steps[0]);
+ init_linearize_data(&eigenvalues_out_ld,
+ 1, eigh_params.N,
+ 0, steps[2]);
+ if ('V' == eigh_params.JOBZ) {
+ init_linearize_data(&eigenvectors_out_ld,
+ eigh_params.N, eigh_params.N,
+ steps[4], steps[3]);
+ }
+
+ for (iter = 0; iter < outer_dim; ++iter) {
+ int not_ok;
+ /* copy the matrix in */
+ linearize_@TYPE@_matrix(eigh_params.A, args[0], &matrix_in_ld);
+ not_ok = call_@lapack_func@(&eigh_params);
+ if (!not_ok) {
+ /* lapack ok, copy result out */
+ delinearize_@BASETYPE@_matrix(args[1],
+ eigh_params.W,
+ &eigenvalues_out_ld);
+
+ if ('V' == eigh_params.JOBZ) {
+ delinearize_@TYPE@_matrix(args[2],
+ eigh_params.A,
+ &eigenvectors_out_ld);
+ }
+ } else {
+ /* lapack fail, set result to nan */
+ error_occurred = 1;
+ nan_@BASETYPE@_matrix(args[1], &eigenvalues_out_ld);
+ if ('V' == eigh_params.JOBZ) {
+ nan_@TYPE@_matrix(args[2], &eigenvectors_out_ld);
+ }
+ }
+ update_pointers((npy_uint8**)args, outer_steps, op_count);
+ }
+
+ release_@lapack_func@(&eigh_params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ */
+static void
+@TYPE@_eighlo(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_eigh_wrapper('V', 'L', args, dimensions, steps);
+}
+
+static void
+@TYPE@_eighup(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void* NPY_UNUSED(func))
+{
+ @TYPE@_eigh_wrapper('V', 'U', args, dimensions, steps);
+}
+
+static void
+@TYPE@_eigvalshlo(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void* NPY_UNUSED(func))
+{
+ @TYPE@_eigh_wrapper('N', 'L', args, dimensions, steps);
+}
+
+static void
+@TYPE@_eigvalshup(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void* NPY_UNUSED(func))
+{
+ @TYPE@_eigh_wrapper('N', 'U', args, dimensions, steps);
+}
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* Solve family (includes inv) */
+
+typedef struct gesv_params_struct
+{
+ void *A; /* A is (N,N) of base type */
+ void *B; /* B is (N,NRHS) of base type */
+ fortran_int * IPIV; /* IPIV is (N) */
+
+ fortran_int N;
+ fortran_int NRHS;
+ fortran_int LDA;
+ fortran_int LDB;
+} GESV_PARAMS_t;
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=npy_float,npy_double,npy_cfloat,npy_cdouble#
+ #ftyp=fortran_real,fortran_doublereal,fortran_complex,fortran_doublecomplex#
+ #lapack_func=sgesv,dgesv,cgesv,zgesv#
+*/
+
+/*
+ * Initialize the parameters to use in for the lapack function _heev
+ * Handles buffer allocation
+ */
+static inline int
+init_@lapack_func@(GESV_PARAMS_t *params, fortran_int N, fortran_int NRHS)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *a, *b, *ipiv;
+ mem_buff = malloc(N*N*sizeof(@ftyp@) +
+ N*NRHS*sizeof(@ftyp@) +
+ N*sizeof(fortran_int));
+ if (!mem_buff)
+ goto error;
+ a = mem_buff;
+ b = a + N*N*sizeof(@ftyp@);
+ ipiv = b + N*NRHS*sizeof(@ftyp@);
+
+ params->A = a;
+ params->B = b;
+ params->IPIV = (fortran_int*)ipiv;
+ params->N = N;
+ params->NRHS = NRHS;
+ params->LDA = N;
+ params->LDB = N;
+
+ return 1;
+ error:
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline void
+release_@lapack_func@(GESV_PARAMS_t *params)
+{
+ /* memory block base is in A */
+ free(params->A);
+ memset(params, 0, sizeof(*params));
+}
+
+static inline fortran_int
+call_@lapack_func@(GESV_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->N, &params->NRHS,
+ params->A, &params->LDA,
+ params->IPIV,
+ params->B, &params->LDB,
+ &rv);
+ return rv;
+}
+
+static void
+@TYPE@_solve(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ GESV_PARAMS_t params;
+ fortran_int n, nrhs;
+ int error_occurred = get_fp_invalid_and_clear();
+ INIT_OUTER_LOOP_3
+
+ n = (fortran_int)dimensions[0];
+ nrhs = (fortran_int)dimensions[1];
+ if (init_@lapack_func@(&params, n, nrhs)) {
+ LINEARIZE_DATA_t a_in, b_in, r_out;
+
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ init_linearize_data(&b_in, nrhs, n, steps[3], steps[2]);
+ init_linearize_data(&r_out, nrhs, n, steps[5], steps[4]);
+
+ BEGIN_OUTER_LOOP_3
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ linearize_@TYPE@_matrix(params.B, args[1], &b_in);
+ not_ok =call_@lapack_func@(&params);
+ if (!not_ok) {
+ delinearize_@TYPE@_matrix(args[2], params.B, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[2], &r_out);
+ }
+ END_OUTER_LOOP
+
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_solve1(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ GESV_PARAMS_t params;
+ int error_occurred = get_fp_invalid_and_clear();
+ fortran_int n;
+ INIT_OUTER_LOOP_3
+
+ n = (fortran_int)dimensions[0];
+ if (init_@lapack_func@(&params, n, 1)) {
+ LINEARIZE_DATA_t a_in, b_in, r_out;
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ init_linearize_data(&b_in, 1, n, 1, steps[2]);
+ init_linearize_data(&r_out, 1, n, 1, steps[3]);
+
+ BEGIN_OUTER_LOOP_3
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ linearize_@TYPE@_matrix(params.B, args[1], &b_in);
+ not_ok = call_@lapack_func@(&params);
+ if (!not_ok) {
+ delinearize_@TYPE@_matrix(args[2], params.B, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[2], &r_out);
+ }
+ END_OUTER_LOOP
+
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_inv(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ GESV_PARAMS_t params;
+ fortran_int n;
+ int error_occurred = get_fp_invalid_and_clear();
+ INIT_OUTER_LOOP_2
+
+ n = (fortran_int)dimensions[0];
+ if (init_@lapack_func@(&params, n, n)) {
+ LINEARIZE_DATA_t a_in, r_out;
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ init_linearize_data(&r_out, n, n, steps[3], steps[2]);
+
+ BEGIN_OUTER_LOOP_2
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ identity_@TYPE@_matrix(params.B, n);
+ not_ok = call_@lapack_func@(&params);
+ if (!not_ok) {
+ delinearize_@TYPE@_matrix(args[1], params.B, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[1], &r_out);
+ }
+ END_OUTER_LOOP
+
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+/**end repeat**/
+
+
+/* -------------------------------------------------------------------------- */
+ /* Cholesky decomposition */
+
+typedef struct potr_params_struct
+{
+ void *A;
+ fortran_int N;
+ fortran_int LDA;
+ char UPLO;
+} POTR_PARAMS_t;
+
+/**begin repeat
+
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #ftyp=fortran_real, fortran_doublereal,
+ fortran_complex, fortran_doublecomplex#
+ #lapack_func=spotrf,dpotrf,cpotrf,zpotrf#
+ */
+
+static inline int
+init_@lapack_func@(POTR_PARAMS_t *params, char UPLO, fortran_int N)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *a;
+
+ mem_buff = malloc(N*N*sizeof(@ftyp@));
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+
+ params->A = a;
+ params->N = N;
+ params->LDA = N;
+ params->UPLO = UPLO;
+
+ return 1;
+ error:
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline void
+release_@lapack_func@(POTR_PARAMS_t *params)
+{
+ /* memory block base in A */
+ free(params->A);
+ memset(params, 0, sizeof(*params));
+}
+
+static inline fortran_int
+call_@lapack_func@(POTR_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->UPLO,
+ &params->N, params->A, &params->LDA,
+ &rv);
+ return rv;
+}
+
+static void
+@TYPE@_cholesky(char uplo, char **args, npy_intp *dimensions, npy_intp *steps)
+{
+ POTR_PARAMS_t params;
+ int error_occurred = get_fp_invalid_and_clear();
+ fortran_int n;
+ INIT_OUTER_LOOP_2
+
+ n = (fortran_int)dimensions[0];
+ if (init_@lapack_func@(&params, uplo, n))
+ {
+ LINEARIZE_DATA_t a_in, r_out;
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ init_linearize_data(&r_out, n, n, steps[3], steps[2]);
+ BEGIN_OUTER_LOOP_2
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ not_ok = call_@lapack_func@(&params);
+ if (!not_ok) {
+ triu_@TYPE@_matrix(params.A, params.N);
+ delinearize_@TYPE@_matrix(args[1], params.A, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[1], &r_out);
+ }
+ END_OUTER_LOOP
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_cholesky_up(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_cholesky('U', args, dimensions, steps);
+}
+
+static void
+@TYPE@_cholesky_lo(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_cholesky('L', args, dimensions, steps);
+}
+
+
+
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* eig family */
+
+typedef struct geev_params_struct {
+ void *A;
+ void *WR; /* RWORK in complex versions, REAL W buffer for (sd)geev*/
+ void *WI;
+ void *VLR; /* REAL VL buffers for _geev where _ is s, d */
+ void *VRR; /* REAL VR buffers for _geev hwere _ is s, d */
+ void *WORK;
+ void *W; /* final w */
+ void *VL; /* final vl */
+ void *VR; /* final vr */
+
+ fortran_int N;
+ fortran_int LDA;
+ fortran_int LDVL;
+ fortran_int LDVR;
+ fortran_int LWORK;
+
+ char JOBVL;
+ char JOBVR;
+} GEEV_PARAMS_t;
+
+static inline void
+dump_geev_params(const char *name, GEEV_PARAMS_t* params)
+{
+ TRACE_TXT("\n%s\n"
+
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+ "\t%10s: %p\n"\
+
+ "\t%10s: %d\n"\
+ "\t%10s: %d\n"\
+ "\t%10s: %d\n"\
+ "\t%10s: %d\n"\
+ "\t%10s: %d\n"\
+
+ "\t%10s: %c\n"\
+ "\t%10s: %c\n",
+
+ name,
+
+ "A", params->A,
+ "WR", params->WR,
+ "WI", params->WI,
+ "VLR", params->VLR,
+ "VRR", params->VRR,
+ "WORK", params->WORK,
+ "W", params->W,
+ "VL", params->VL,
+ "VR", params->VR,
+
+ "N", (int)params->N,
+ "LDA", (int)params->LDA,
+ "LDVL", (int)params->LDVL,
+ "LDVR", (int)params->LDVR,
+ "LWORK", (int)params->LWORK,
+
+ "JOBVL", params->JOBVL,
+ "JOBVR", params->JOBVR);
+}
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE#
+ #CTYPE=CFLOAT,CDOUBLE#
+ #typ=float,double#
+ #complextyp=COMPLEX_t,DOUBLECOMPLEX_t#
+ #lapack_func=sgeev,dgeev#
+ #zero=0.0f,0.0#
+*/
+static inline int
+init_@lapack_func@(GEEV_PARAMS_t *params, char jobvl, char jobvr, fortran_int n)
+{
+ npy_uint8 *mem_buff=NULL;
+ npy_uint8 *mem_buff2=NULL;
+ npy_uint8 *a, *wr, *wi, *vlr, *vrr, *work, *w, *vl, *vr;
+ size_t a_size = n*n*sizeof(@typ@);
+ size_t wr_size = n*sizeof(@typ@);
+ size_t wi_size = n*sizeof(@typ@);
+ size_t vlr_size = jobvl=='V' ? n*n*sizeof(@typ@) : 0;
+ size_t vrr_size = jobvr=='V' ? n*n*sizeof(@typ@) : 0;
+ size_t w_size = wr_size*2;
+ size_t vl_size = vlr_size*2;
+ size_t vr_size = vrr_size*2;
+ size_t work_count = 0;
+ @typ@ work_size_query;
+ fortran_int do_size_query = -1;
+ fortran_int rv;
+
+ /* allocate data for known sizes (all but work) */
+ mem_buff = malloc(a_size + wr_size + wi_size +
+ vlr_size + vrr_size +
+ w_size + vl_size + vr_size);
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ wr = a + a_size;
+ wi = wr + wr_size;
+ vlr = wi + wi_size;
+ vrr = vlr + vlr_size;
+ w = vrr + vrr_size;
+ vl = w + w_size;
+ vr = vl + vl_size;
+ LAPACK(@lapack_func@)(&jobvl, &jobvr, &n,
+ (void *)a, &n, (void *)wr, (void *)wi,
+ (void *)vl, &n, (void *)vr, &n,
+ &work_size_query, &do_size_query,
+ &rv);
+
+ if (0 != rv)
+ goto error;
+
+ work_count = (size_t)work_size_query;
+ mem_buff2 = malloc(work_count*sizeof(@typ@));
+ if (!mem_buff2)
+ goto error;
+ work = mem_buff2;
+
+ params->A = a;
+ params->WR = wr;
+ params->WI = wi;
+ params->VLR = vlr;
+ params->VRR = vrr;
+ params->WORK = work;
+ params->W = w;
+ params->VL = vl;
+ params->VR = vr;
+ params->N = n;
+ params->LDA = n;
+ params->LDVL = n;
+ params->LDVR = n;
+ params->LWORK = (fortran_int)work_count;
+ params->JOBVL = jobvl;
+ params->JOBVR = jobvr;
+
+ return 1;
+ error:
+ free(mem_buff2);
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(GEEV_PARAMS_t* params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->JOBVL, &params->JOBVR,
+ &params->N, params->A, &params->LDA,
+ params->WR, params->WI,
+ params->VLR, &params->LDVL,
+ params->VRR, &params->LDVR,
+ params->WORK, &params->LWORK,
+ &rv);
+ return rv;
+}
+
+
+static inline void
+mk_@TYPE@_complex_array_from_real(@complextyp@ *c, const @typ@ *re, size_t n)
+{
+ size_t iter;
+ for (iter = 0; iter < n; ++iter) {
+ c[iter].array[0] = re[iter];
+ c[iter].array[1] = @zero@;
+ }
+}
+
+static inline void
+mk_@TYPE@_complex_array(@complextyp@ *c,
+ const @typ@ *re,
+ const @typ@ *im,
+ size_t n)
+{
+ size_t iter;
+ for (iter = 0; iter < n; ++iter) {
+ c[iter].array[0] = re[iter];
+ c[iter].array[1] = im[iter];
+ }
+}
+
+static inline void
+mk_@TYPE@_complex_array_conjugate_pair(@complextyp@ *c,
+ const @typ@ *r,
+ size_t n)
+{
+ size_t iter;
+ for (iter = 0; iter < n; ++iter) {
+ @typ@ re = r[iter];
+ @typ@ im = r[iter+n];
+ c[iter].array[0] = re;
+ c[iter].array[1] = im;
+ c[iter+n].array[0] = re;
+ c[iter+n].array[1] = -im;
+ }
+}
+
+/*
+ * make the complex eigenvectors from the real array produced by sgeev/zgeev.
+ * c is the array where the results will be left.
+ * r is the source array of reals produced by sgeev/zgeev
+ * i is the eigenvalue imaginary part produced by sgeev/zgeev
+ * n is so that the order of the matrix is n by n
+ */
+static inline void
+mk_@lapack_func@_complex_eigenvectors(@complextyp@ *c,
+ const @typ@ *r,
+ const @typ@ *i,
+ size_t n)
+{
+ size_t iter = 0;
+ while (iter < n)
+ {
+ if (i[iter] == @zero@) {
+ /* eigenvalue was real, eigenvectors as well... */
+ mk_@TYPE@_complex_array_from_real(c, r, n);
+ c += n;
+ r += n;
+ iter ++;
+ } else {
+ /* eigenvalue was complex, generate a pair of eigenvectors */
+ mk_@TYPE@_complex_array_conjugate_pair(c, r, n);
+ c += 2*n;
+ r += 2*n;
+ iter += 2;
+ }
+ }
+}
+
+
+static inline void
+process_@lapack_func@_results(GEEV_PARAMS_t *params)
+{
+ /* REAL versions of geev need the results to be translated
+ * into complex versions. This is the way to deal with imaginary
+ * results. In our gufuncs we will always return complex arrays!
+ */
+ mk_@TYPE@_complex_array(params->W, params->WR, params->WI, params->N);
+
+ /* handle the eigenvectors */
+ if ('V' == params->JOBVL) {
+ mk_@lapack_func@_complex_eigenvectors(params->VL, params->VLR,
+ params->WI, params->N);
+ }
+ if ('V' == params->JOBVR) {
+ mk_@lapack_func@_complex_eigenvectors(params->VR, params->VRR,
+ params->WI, params->N);
+ }
+}
+
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE#
+ #typ=COMPLEX_t,DOUBLECOMPLEX_t#
+ #ftyp=fortran_complex,fortran_doublecomplex#
+ #realtyp=float,double#
+ #lapack_func=cgeev,zgeev#
+ */
+
+static inline int
+init_@lapack_func@(GEEV_PARAMS_t* params,
+ char jobvl,
+ char jobvr,
+ fortran_int n)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ npy_uint8 *a, *w, *vl, *vr, *work, *rwork;
+ size_t a_size = n*n*sizeof(@ftyp@);
+ size_t w_size = n*sizeof(@ftyp@);
+ size_t vl_size = jobvl=='V'? n*n*sizeof(@ftyp@) : 0;
+ size_t vr_size = jobvr=='V'? n*n*sizeof(@ftyp@) : 0;
+ size_t rwork_size = 2*n*sizeof(@realtyp@);
+ size_t work_count = 0;
+ @typ@ work_size_query;
+ fortran_int do_size_query = -1;
+ fortran_int rv;
+ size_t total_size = a_size + w_size + vl_size + vr_size + rwork_size;
+
+ mem_buff = malloc(total_size);
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ w = a + a_size;
+ vl = w + w_size;
+ vr = vl + vl_size;
+ rwork = vr + vr_size;
+ LAPACK(@lapack_func@)(&jobvl, &jobvr, &n,
+ (void *)a, &n, (void *)w,
+ (void *)vl, &n, (void *)vr, &n,
+ (void *)&work_size_query, &do_size_query,
+ (void *)rwork,
+ &rv);
+ if (0 != rv)
+ goto error;
+
+ work_count = (size_t) work_size_query.array[0];
+ mem_buff2 = malloc(work_count*sizeof(@ftyp@));
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+
+ params->A = a;
+ params->WR = rwork;
+ params->WI = NULL;
+ params->VLR = NULL;
+ params->VRR = NULL;
+ params->VL = vl;
+ params->VR = vr;
+ params->WORK = work;
+ params->W = w;
+ params->N = n;
+ params->LDA = n;
+ params->LDVL = n;
+ params->LDVR = n;
+ params->LWORK = (fortran_int)work_count;
+ params->JOBVL = jobvl;
+ params->JOBVR = jobvr;
+
+ return 1;
+ error:
+ free(mem_buff2);
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(GEEV_PARAMS_t* params)
+{
+ fortran_int rv;
+
+ LAPACK(@lapack_func@)(&params->JOBVL, &params->JOBVR,
+ &params->N, params->A, &params->LDA,
+ params->W,
+ params->VL, &params->LDVL,
+ params->VR, &params->LDVR,
+ params->WORK, &params->LWORK,
+ params->WR, /* actually RWORK */
+ &rv);
+ return rv;
+}
+
+
+static inline void
+process_@lapack_func@_results(GEEV_PARAMS_t *NPY_UNUSED(params))
+{
+ /* nothing to do here, complex versions are ready to copy out */
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #COMPLEXTYPE=CFLOAT,CDOUBLE,CFLOAT,CDOUBLE#
+ #ftype=fortran_real,fortran_doublereal,fortran_complex,fortran_doublecomplex#
+ #lapack_func=sgeev,dgeev,cgeev,zgeev#
+ */
+
+static inline void
+release_@lapack_func@(GEEV_PARAMS_t *params)
+{
+ free(params->WORK);
+ free(params->A);
+ memset(params, 0, sizeof(*params));
+}
+
+static inline void
+@TYPE@_eig_wrapper(char JOBVL,
+ char JOBVR,
+ char**args,
+ npy_intp* dimensions,
+ npy_intp* steps)
+{
+ ptrdiff_t outer_steps[4];
+ size_t iter;
+ size_t outer_dim = *dimensions++;
+ size_t op_count = 2;
+ int error_occurred = get_fp_invalid_and_clear();
+ GEEV_PARAMS_t geev_params;
+
+ STACK_TRACE;
+ op_count += 'V'==JOBVL?1:0;
+ op_count += 'V'==JOBVR?1:0;
+
+ for (iter=0; iter < op_count; ++iter) {
+ outer_steps[iter] = (ptrdiff_t) steps[iter];
+ }
+ steps += op_count;
+
+ if (init_@lapack_func@(&geev_params,
+ JOBVL, JOBVR,
+ (fortran_int)dimensions[0])) {
+ LINEARIZE_DATA_t a_in;
+ LINEARIZE_DATA_t w_out;
+ LINEARIZE_DATA_t vl_out;
+ LINEARIZE_DATA_t vr_out;
+
+ init_linearize_data(&a_in,
+ geev_params.N, geev_params.N,
+ steps[1], steps[0]);
+ steps += 2;
+ init_linearize_data(&w_out,
+ 1, geev_params.N,
+ 0, steps[0]);
+ steps += 1;
+ if ('V' == geev_params.JOBVL) {
+ init_linearize_data(&vl_out,
+ geev_params.N, geev_params.N,
+ steps[1], steps[0]);
+ steps += 2;
+ }
+ if ('V' == geev_params.JOBVR) {
+ init_linearize_data(&vr_out,
+ geev_params.N, geev_params.N,
+ steps[1], steps[0]);
+ }
+
+ for (iter = 0; iter < outer_dim; ++iter) {
+ int not_ok;
+ char **arg_iter = args;
+ /* copy the matrix in */
+ linearize_@TYPE@_matrix(geev_params.A, *arg_iter++, &a_in);
+ not_ok = call_@lapack_func@(&geev_params);
+
+ if (!not_ok) {
+ process_@lapack_func@_results(&geev_params);
+ delinearize_@COMPLEXTYPE@_matrix(*arg_iter++,
+ geev_params.W,
+ &w_out);
+
+ if ('V' == geev_params.JOBVL)
+ delinearize_@COMPLEXTYPE@_matrix(*arg_iter++,
+ geev_params.VL,
+ &vl_out);
+ if ('V' == geev_params.JOBVR)
+ delinearize_@COMPLEXTYPE@_matrix(*arg_iter++,
+ geev_params.VR,
+ &vr_out);
+ } else {
+ /* geev failed */
+ error_occurred = 1;
+ nan_@COMPLEXTYPE@_matrix(*arg_iter++, &w_out);
+ if ('V' == geev_params.JOBVL)
+ nan_@COMPLEXTYPE@_matrix(*arg_iter++, &vl_out);
+ if ('V' == geev_params.JOBVR)
+ nan_@COMPLEXTYPE@_matrix(*arg_iter++, &vr_out);
+ }
+ update_pointers((npy_uint8**)args, outer_steps, op_count);
+ }
+
+ release_@lapack_func@(&geev_params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_eig(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_eig_wrapper('N', 'V', args, dimensions, steps);
+}
+
+static void
+@TYPE@_eigvals(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_eig_wrapper('N', 'N', args, dimensions, steps);
+}
+
+/**end repeat**/
+
+
+/* -------------------------------------------------------------------------- */
+ /* singular value decomposition */
+
+typedef struct gessd_params_struct
+{
+ void *A;
+ void *S;
+ void *U;
+ void *VT;
+ void *WORK;
+ void *RWORK;
+ void *IWORK;
+
+ fortran_int M;
+ fortran_int N;
+ fortran_int LDA;
+ fortran_int LDU;
+ fortran_int LDVT;
+ fortran_int LWORK;
+ char JOBZ;
+} GESDD_PARAMS_t;
+
+
+static inline void
+dump_gesdd_params(const char *name,
+ GESDD_PARAMS_t *params)
+{
+ TRACE_TXT("\n%s:\n"\
+
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+ "%14s: %18p\n"\
+
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+ "%14s: %18d\n"\
+
+ "%14s: %15c'%c'\n",
+
+ name,
+
+ "A", params->A,
+ "S", params->S,
+ "U", params->U,
+ "VT", params->VT,
+ "WORK", params->WORK,
+ "RWORK", params->RWORK,
+ "IWORK", params->IWORK,
+
+ "M", (int)params->M,
+ "N", (int)params->N,
+ "LDA", (int)params->LDA,
+ "LDU", (int)params->LDU,
+ "LDVT", (int)params->LDVT,
+ "LWORK", (int)params->LWORK,
+
+ "JOBZ", ' ',params->JOBZ);
+}
+
+static inline int
+compute_urows_vtcolumns(char jobz,
+ fortran_int m, fortran_int n,
+ fortran_int *urows, fortran_int *vtcolumns)
+{
+ fortran_int min_m_n = m<n?m:n;
+ switch(jobz)
+ {
+ case 'N':
+ *urows = 0;
+ *vtcolumns = 0;
+ break;
+ case 'A':
+ *urows = m;
+ *vtcolumns = n;
+ break;
+ case 'S':
+ {
+ *urows = min_m_n;
+ *vtcolumns = min_m_n;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE#
+ #lapack_func=sgesdd,dgesdd#
+ #ftyp=fortran_real,fortran_doublereal#
+ */
+
+static inline int
+init_@lapack_func@(GESDD_PARAMS_t *params,
+ char jobz,
+ fortran_int m,
+ fortran_int n)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *mem_buff2 = NULL;
+ npy_uint8 *a, *s, *u, *vt, *work, *iwork;
+ size_t a_size = (size_t)m*(size_t)n*sizeof(@ftyp@);
+ fortran_int min_m_n = m<n?m:n;
+ size_t s_size = ((size_t)min_m_n)*sizeof(@ftyp@);
+ fortran_int u_row_count, vt_column_count;
+ size_t u_size, vt_size;
+ fortran_int work_count;
+ size_t work_size;
+ size_t iwork_size = 8*((size_t)min_m_n)*sizeof(fortran_int);
+
+ if (!compute_urows_vtcolumns(jobz, m, n, &u_row_count, &vt_column_count))
+ goto error;
+
+ u_size = ((size_t)u_row_count)*m*sizeof(@ftyp@);
+ vt_size = n*((size_t)vt_column_count)*sizeof(@ftyp@);
+
+ mem_buff = malloc(a_size + s_size + u_size + vt_size + iwork_size);
+
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ s = a + a_size;
+ u = s + s_size;
+ vt = u + u_size;
+ iwork = vt + vt_size;
+
+ /* fix vt_column_count so that it is a valid lapack parameter (0 is not) */
+ vt_column_count = vt_column_count < 1? 1 : vt_column_count;
+ {
+ /* compute optimal work size */
+ @ftyp@ work_size_query;
+ fortran_int do_query = -1;
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&jobz, &m, &n,
+ (void*)a, &m, (void*)s, (void*)u, &m,
+ (void*)vt, &vt_column_count,
+ &work_size_query, &do_query,
+ (void*)iwork, &rv);
+ if (0!=rv)
+ goto error;
+ work_count = (fortran_int)work_size_query;
+ work_size = (size_t)work_count * sizeof(@ftyp@);
+ }
+
+ mem_buff2 = malloc(work_size);
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+
+ params->M = m;
+ params->N = n;
+ params->A = a;
+ params->S = s;
+ params->U = u;
+ params->VT = vt;
+ params->WORK = work;
+ params->RWORK = NULL;
+ params->IWORK = iwork;
+ params->M = m;
+ params->N = n;
+ params->LDA = m;
+ params->LDU = m;
+ params->LDVT = vt_column_count;
+ params->LWORK = work_count;
+ params->JOBZ = jobz;
+
+ return 1;
+ error:
+ TRACE_TXT("%s failed init\n", __FUNCTION__);
+ free(mem_buff2);
+ free(mem_buff2);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(GESDD_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->JOBZ, &params->M, &params->N,
+ params->A, &params->LDA,
+ params->S,
+ params->U, &params->LDU,
+ params->VT, &params->LDVT,
+ params->WORK, &params->LWORK,
+ params->IWORK,
+ &rv);
+ return rv;
+}
+
+/**end repeat**/
+
+/**begin repeat
+ #TYPE=CFLOAT,CDOUBLE#
+ #ftyp=fortran_complex,fortran_doublecomplex#
+ #frealtyp=fortran_real,fortran_doublereal#
+ #typ=COMPLEX_t,DOUBLECOMPLEX_t#
+ #lapack_func=cgesdd,zgesdd#
+ */
+
+static inline int
+init_@lapack_func@(GESDD_PARAMS_t *params,
+ char jobz,
+ fortran_int m,
+ fortran_int n)
+{
+ npy_uint8 *mem_buff = NULL, *mem_buff2 = NULL;
+ npy_uint8 *a,*s, *u, *vt, *work, *rwork, *iwork;
+ size_t a_size, s_size, u_size, vt_size, work_size, rwork_size, iwork_size;
+ fortran_int u_row_count, vt_column_count, work_count;
+ fortran_int min_m_n = m<n?m:n;
+
+ if (!compute_urows_vtcolumns(jobz, m, n, &u_row_count, &vt_column_count))
+ goto error;
+
+ a_size = ((size_t)m)*((size_t)n)*sizeof(@ftyp@);
+ s_size = ((size_t)min_m_n)*sizeof(@frealtyp@);
+ u_size = ((size_t)u_row_count)*m*sizeof(@ftyp@);
+ vt_size = n*((size_t)vt_column_count)*sizeof(@ftyp@);
+ rwork_size = 'N'==jobz?
+ 7*((size_t)min_m_n) :
+ (5*(size_t)min_m_n*(size_t)min_m_n + 5*(size_t)min_m_n);
+ rwork_size *= sizeof(@ftyp@);
+ iwork_size = 8*((size_t)min_m_n)*sizeof(fortran_int);
+
+ mem_buff = malloc(a_size +
+ s_size +
+ u_size +
+ vt_size +
+ rwork_size +
+ iwork_size);
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ s = a + a_size;
+ u = s + s_size;
+ vt = u + u_size;
+ rwork = vt + vt_size;
+ iwork = rwork + rwork_size;
+
+ /* fix vt_column_count so that it is a valid lapack parameter (0 is not) */
+ vt_column_count = vt_column_count < 1? 1 : vt_column_count;
+ {
+ /* compute optimal work size */
+ @ftyp@ work_size_query;
+ fortran_int do_query = -1;
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&jobz, &m, &n,
+ (void*)a, &m, (void*)s, (void*)u, &m,
+ (void*)vt, &vt_column_count,
+ &work_size_query, &do_query,
+ (void*)rwork,
+ (void*)iwork, &rv);
+ if (0!=rv)
+ goto error;
+ work_count = (fortran_int)((@typ@*)&work_size_query)->array[0];
+ work_size = (size_t)work_count * sizeof(@ftyp@);
+ }
+
+ mem_buff2 = malloc(work_size);
+ if (!mem_buff2)
+ goto error;
+
+ work = mem_buff2;
+
+ params->A = a;
+ params->S = s;
+ params->U = u;
+ params->VT = vt;
+ params->WORK = work;
+ params->RWORK = rwork;
+ params->IWORK = iwork;
+ params->M = m;
+ params->N = n;
+ params->LDA = m;
+ params->LDU = m;
+ params->LDVT = vt_column_count;
+ params->LWORK = work_count;
+ params->JOBZ = jobz;
+
+ return 1;
+ error:
+ TRACE_TXT("%s failed init\n", __FUNCTION__);
+ free(mem_buff2);
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline fortran_int
+call_@lapack_func@(GESDD_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func@)(&params->JOBZ, &params->M, &params->N,
+ params->A, &params->LDA,
+ params->S,
+ params->U, &params->LDU,
+ params->VT, &params->LDVT,
+ params->WORK, &params->LWORK,
+ params->RWORK,
+ params->IWORK,
+ &rv);
+ return rv;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #REALTYPE=FLOAT,DOUBLE,FLOAT,DOUBLE#
+ #lapack_func=sgesdd,dgesdd,cgesdd,zgesdd#
+ */
+static inline void
+release_@lapack_func@(GESDD_PARAMS_t* params)
+{
+ /* A and WORK contain allocated blocks */
+ free(params->A);
+ free(params->WORK);
+ memset(params, 0, sizeof(*params));
+}
+
+static inline void
+@TYPE@_svd_wrapper(char JOBZ,
+ char **args,
+ npy_intp* dimensions,
+ npy_intp* steps)
+{
+ ptrdiff_t outer_steps[4];
+ int error_occurred = get_fp_invalid_and_clear();
+ size_t iter;
+ size_t outer_dim = *dimensions++;
+ size_t op_count = (JOBZ=='N')?2:4;
+ GESDD_PARAMS_t params;
+
+ for (iter=0; iter < op_count; ++iter) {
+ outer_steps[iter] = (ptrdiff_t) steps[iter];
+ }
+ steps += op_count;
+
+ if (init_@lapack_func@(&params,
+ JOBZ,
+ (fortran_int)dimensions[0],
+ (fortran_int)dimensions[1])) {
+ LINEARIZE_DATA_t a_in, u_out, s_out, v_out;
+
+ init_linearize_data(&a_in, params.N, params.M, steps[1], steps[0]);
+ if ('N' == params.JOBZ) {
+ /* only the singular values are wanted */
+ fortran_int min_m_n = params.M < params.N? params.M : params.N;
+ init_linearize_data(&s_out, 1, min_m_n, 0, steps[2]);
+ } else {
+ fortran_int u_columns, v_rows;
+ fortran_int min_m_n = params.M < params.N? params.M : params.N;
+ if ('S' == params.JOBZ) {
+ u_columns = min_m_n;
+ v_rows = min_m_n;
+ } else {
+ u_columns = params.M;
+ v_rows = params.N;
+ }
+ init_linearize_data(&u_out,
+ u_columns, params.M,
+ steps[3], steps[2]);
+ init_linearize_data(&s_out,
+ 1, min_m_n,
+ 0, steps[4]);
+ init_linearize_data(&v_out,
+ params.N, v_rows,
+ steps[6], steps[5]);
+ }
+
+ for (iter = 0; iter < outer_dim; ++iter) {
+ int not_ok;
+ /* copy the matrix in */
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ not_ok = call_@lapack_func@(&params);
+ if (!not_ok) {
+ if ('N' == params.JOBZ) {
+ delinearize_@REALTYPE@_matrix(args[1], params.S, &s_out);
+ } else {
+ delinearize_@TYPE@_matrix(args[1], params.U, &u_out);
+ delinearize_@REALTYPE@_matrix(args[2], params.S, &s_out);
+ delinearize_@TYPE@_matrix(args[3], params.VT, &v_out);
+ }
+ } else {
+ error_occurred = 1;
+ if ('N' == params.JOBZ) {
+ nan_@REALTYPE@_matrix(args[1], &s_out);
+ } else {
+ nan_@TYPE@_matrix(args[1], &u_out);
+ nan_@REALTYPE@_matrix(args[2], &s_out);
+ nan_@TYPE@_matrix(args[3], &v_out);
+ }
+ }
+ update_pointers((npy_uint8**)args, outer_steps, op_count);
+ }
+
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+/**end repeat*/
+
+
+/* svd gufunc entry points */
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ */
+static void
+@TYPE@_svd_N(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_svd_wrapper('N', args, dimensions, steps);
+}
+
+static void
+@TYPE@_svd_S(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_svd_wrapper('S', args, dimensions, steps);
+}
+
+static void
+@TYPE@_svd_A(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_svd_wrapper('A', args, dimensions, steps);
+}
+
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* some basic ufuncs */
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ */
+
+static void
+@TYPE@_add3(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_4
+ BEGIN_OUTER_LOOP_4
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ tmp;
+ tmp = @TYPE@_add(*op1p, *op2p);
+ *op4p = @TYPE@_add(tmp, *op3p);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply3(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_4
+ BEGIN_OUTER_LOOP_4
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ tmp;
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ *op4p = @TYPE@_mul(tmp, *op3p);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply3_add(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_5
+ BEGIN_OUTER_LOOP_5
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ *op5p = (@typ@ *) args[4];
+ @typ@ tmp, tmp2;
+
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ tmp2 = @TYPE@_mul(tmp, *op3p);
+ *op5p = @TYPE@_add(tmp2, *op4p);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply_add(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_4
+ BEGIN_OUTER_LOOP_4
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ tmp;
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ *op4p = @TYPE@_add(tmp, *op3p);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply_add2(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_5
+ BEGIN_OUTER_LOOP_5
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ *op5p = (@typ@ *) args[4];
+ @typ@ tmp, tmp2;
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ tmp2 = @TYPE@_add(*op3p, *op4p);
+ *op5p = @TYPE@_add(tmp, tmp2);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply4(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_5
+ BEGIN_OUTER_LOOP_5
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ *op5p = (@typ@ *) args[4];
+ @typ@ tmp, tmp2;
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ tmp2 = @TYPE@_mul(*op3p, *op4p);
+ *op5p = @TYPE@_mul(tmp, tmp2);
+ END_OUTER_LOOP
+}
+
+static void
+@TYPE@_multiply4_add(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_6
+ BEGIN_OUTER_LOOP_6
+ @typ@ *op1p = (@typ@ *) args[0];
+ @typ@ *op2p = (@typ@ *) args[1];
+ @typ@ *op3p = (@typ@ *) args[2];
+ @typ@ *op4p = (@typ@ *) args[3];
+ @typ@ *op5p = (@typ@ *) args[4];
+ @typ@ *op6p = (@typ@ *) args[5];
+ @typ@ tmp, tmp2, tmp3;
+ tmp = @TYPE@_mul(*op1p, *op2p);
+ tmp2 = @TYPE@_mul(*op3p, *op4p);
+ tmp3 = @TYPE@_mul(tmp, tmp2);
+ *op6p = @TYPE@_add(tmp3, *op5p);
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* quadratic form */
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #typ=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #ftyp=fortran_real,fortran_doublereal,fortran_complex,fortran_doublecomplex#
+ #zero=s_zero,d_zero,c_zero,z_zero#
+ #blas_dot=FNAME(sdot),FNAME(ddot),FNAME(cdotu),FNAME(zdotu)#
+ */
+
+static inline @typ@
+@TYPE@_dot_blas(size_t count,
+ void *X, intptr_t X_byte_step,
+ void *Y, intptr_t Y_byte_step)
+{
+ @ftyp@ result;
+ fortran_int N = (fortran_int) count;
+ fortran_int INCX = X_byte_step/sizeof(@ftyp@);
+ fortran_int INCY = Y_byte_step/sizeof(@ftyp@);
+
+ result = @blas_dot@(&N, X, &INCX, Y, &INCY);
+
+ return *(@typ@ *)&result;
+}
+
+static inline @typ@
+@TYPE@_dot_std(size_t count,
+ void *X, intptr_t X_byte_step,
+ void *Y, intptr_t Y_byte_step)
+{
+ size_t i;
+ @typ@ acc, *x_ptr, *y_ptr;
+ x_ptr = (@typ@ *)X;
+ y_ptr = (@typ@ *)Y;
+
+ acc = @TYPE@_mul(*x_ptr, *y_ptr);
+ x_ptr = offset_ptr(x_ptr, X_byte_step);
+ y_ptr = offset_ptr(y_ptr, Y_byte_step);
+
+ for (i = 1; i < count; i++) {
+ @typ@ tmp;
+
+ tmp = @TYPE@_mul(*x_ptr, *y_ptr);
+ acc = @TYPE@_add(acc, tmp);
+
+ x_ptr = offset_ptr(x_ptr, X_byte_step);
+ y_ptr = offset_ptr(y_ptr, Y_byte_step);
+ }
+
+ return acc;
+}
+
+/* uQv, where u and v are vectors and Q is a matrix */
+static void
+@TYPE@_quadratic_form(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_4
+ size_t m = (size_t)dimensions[0];
+ size_t n = (size_t)dimensions[1];
+ ptrdiff_t u_step = (ptrdiff_t)steps[0];
+ ptrdiff_t Q_row_step = (ptrdiff_t)steps[1];
+ ptrdiff_t Q_column_step = (ptrdiff_t)steps[2];
+ ptrdiff_t v_step = (ptrdiff_t)steps[3];
+
+ if ((0 == (Q_row_step % sizeof(@ftyp@))) &&
+ (0 == (u_step % sizeof(@ftyp@)))) {
+ /* use blas loops for dot */
+ BEGIN_OUTER_LOOP_4
+ size_t column;
+ npy_uint8 *u, *Q, *v;
+ @typ@ *r;
+ @typ@ accum = @zero@;
+
+ u = (npy_uint8 *)args[0]; /* u (m) [in] */
+ Q = (npy_uint8 *)args[1]; /* Q (m,n) [in] */
+ v = (npy_uint8 *)args[2]; /* v (n) [in] */
+ r = (@typ@ *)args[3]; /* result [out] */
+ /* sum (compute u * Q[i] * v[i]) for all i.
+ Q[i] are the different columns on Q */
+
+ for (column = 0; column < n; ++column) {
+ @typ@ tmp, tmp2;
+ tmp = @TYPE@_dot_blas(m,
+ Q, Q_row_step,
+ u, u_step);
+ tmp2 = @TYPE@_mul(tmp, *(@typ@*)v);
+ accum = @TYPE@_add(accum, tmp2);
+ Q += Q_column_step;
+ v += v_step;
+ }
+
+ *r = accum;
+ END_OUTER_LOOP
+ } else {
+ BEGIN_OUTER_LOOP_4
+ size_t column;
+ npy_uint8 *u, *Q, *v;
+ @typ@ *r;
+ @typ@ accum = @zero@;
+ u = (npy_uint8 *)args[0]; /* u (m) [in] */
+ Q = (npy_uint8 *)args[1]; /* Q (m,n) [in] */
+ v = (npy_uint8 *)args[2]; /* v (n) [in] */
+ r = (@typ@ *)args[3]; /* result [out] */
+ /* sum (compute u * Q[i] * v[i]) for all i.
+ Q[i] are the different columns on Q */
+
+ for (column = 0; column < n; ++column) {
+ @typ@ tmp, tmp2;
+ tmp = @TYPE@_dot_std(m, Q, Q_row_step, u, u_step);
+ tmp2 = @TYPE@_mul(tmp, *(@typ@*)v);
+ accum = @TYPE@_add(accum, tmp2);
+ Q += Q_column_step;
+ v += v_step;
+ }
+
+ *r = accum;
+ END_OUTER_LOOP
+ }
+}
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* chosolve (using potrs) */
+
+typedef struct potrs_params_struct {
+ void *A;
+ void *B;
+ fortran_int N;
+ fortran_int NRHS;
+ fortran_int LDA;
+ fortran_int LDB;
+ char UPLO;
+} POTRS_PARAMS_t;
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #ftyp=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #lapack_func_2=spotrf,dpotrf,cpotrf,zpotrf#
+ #lapack_func=spotrs,dpotrs,cpotrs,zpotrs#
+ */
+
+
+static inline int
+init_@lapack_func@(POTRS_PARAMS_t *params,
+ char UPLO,
+ fortran_int N,
+ fortran_int NRHS)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *a, *b;
+ size_t a_size, b_size;
+
+ a_size = N*N*sizeof(@ftyp@);
+ b_size = N*NRHS*sizeof(@ftyp@);
+ mem_buff = malloc(a_size + b_size);
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+ b = a + a_size;
+
+ params->A = (void*)a;
+ params->B = (void*)b;
+ params->N = N;
+ params->NRHS = NRHS;
+ params->LDA = N;
+ params->LDB = N;
+ params->UPLO = UPLO;
+
+ return 1;
+
+ error:
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline void
+release_@lapack_func@(POTRS_PARAMS_t *params)
+{
+ free(params->A);
+ memset(params,0,sizeof(*params));
+ return;
+}
+
+static inline int
+call_@lapack_func@(POTRS_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@lapack_func_2@)(&params->UPLO,
+ &params->N,
+ params->A, &params->LDA,
+ &rv);
+ if (0 != rv)
+ return rv;
+
+ LAPACK(@lapack_func@)(&params->UPLO,
+ &params->N, &params->NRHS,
+ params->A, &params->LDA,
+ params->B, &params->LDB,
+ &rv);
+ return rv;
+}
+
+
+/* uplo: either 'U' or 'L'
+ ndim: use 1 to get nrhs from dimensions (matrix), 0 to use 1 (vector)
+ */
+static void
+@TYPE@_chosolve(char uplo, char ndim,
+ char **args,
+ npy_intp *dimensions,
+ npy_intp* steps
+ )
+{
+ POTRS_PARAMS_t params;
+ int error_occurred = get_fp_invalid_and_clear();
+ fortran_int n, nrhs;
+ INIT_OUTER_LOOP_3
+
+ n = (fortran_int)dimensions[0];
+ nrhs = ndim?(fortran_int)dimensions[1]:1;
+ if (init_@lapack_func@(&params, uplo, n, nrhs)) {
+ LINEARIZE_DATA_t a_in, b_in, r_out;
+
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ if (ndim) {
+ init_linearize_data(&b_in, nrhs, n, steps[3], steps[2]);
+ init_linearize_data(&r_out, nrhs, n, steps[5], steps[4]);
+ } else {
+ init_linearize_data(&b_in, 1, n, 0, steps[2]);
+ init_linearize_data(&r_out, 1, n, 0, steps[3]);
+ }
+
+ BEGIN_OUTER_LOOP_3
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ linearize_@TYPE@_matrix(params.B, args[1], &b_in);
+ not_ok = call_@lapack_func@(&params);
+ if (!not_ok) {
+ delinearize_@TYPE@_matrix(args[2], params.B, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[2], &r_out);
+ }
+ END_OUTER_LOOP
+
+ release_@lapack_func@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_chosolve_up(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_chosolve('U', 1, args, dimensions, steps);
+}
+
+static void
+@TYPE@_chosolve_lo(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_chosolve('L', 1, args, dimensions, steps);
+}
+
+static void
+@TYPE@_chosolve1_up(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_chosolve('U', 0, args, dimensions, steps);
+}
+
+static void
+@TYPE@_chosolve1_lo(char **args,
+ npy_intp *dimensions,
+ npy_intp* steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_chosolve('L', 0, args, dimensions, steps);
+}
+
+/**end repeat**/
+
+/* -------------------------------------------------------------------------- */
+ /* poinv */
+
+typedef struct potri_params_struct {
+ void *A;
+ fortran_int N;
+ fortran_int LDA;
+ char UPLO;
+} POTRI_PARAMS_t;
+
+
+/**begin repeat
+ #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE#
+ #ftyp=float,double,COMPLEX_t,DOUBLECOMPLEX_t#
+ #potrf=spotrf,dpotrf,cpotrf,zpotrf#
+ #potri=spotri,dpotri,cpotri,zpotri#
+ */
+static inline int
+init_@potri@(POTRI_PARAMS_t *params,
+ char UPLO,
+ fortran_int N)
+{
+ npy_uint8 *mem_buff = NULL;
+ npy_uint8 *a;
+ size_t a_size;
+
+ a_size = N*N*sizeof(@ftyp@);
+ mem_buff = malloc(a_size);
+ if (!mem_buff)
+ goto error;
+
+ a = mem_buff;
+
+ params->A = (void*)a;
+ params->N = N;
+ params->LDA = N;
+ params->UPLO = UPLO;
+
+ return 1;
+
+ error:
+ free(mem_buff);
+ memset(params, 0, sizeof(*params));
+
+ return 0;
+}
+
+static inline void
+release_@potri@(POTRI_PARAMS_t *params)
+{
+ free(params->A);
+ memset(params,0,sizeof(*params));
+ return;
+}
+
+static inline int
+call_@potri@(POTRI_PARAMS_t *params)
+{
+ fortran_int rv;
+ LAPACK(@potrf@)(&params->UPLO,
+ &params->N,
+ params->A, &params->LDA,
+ &rv);
+ if (0!= rv) {
+ return rv;
+ }
+
+ LAPACK(@potri@)(&params->UPLO,
+ &params->N,
+ params->A, &params->LDA,
+ &rv);
+ return rv;
+}
+
+
+static inline void
+@TYPE@_poinv(char uplo,
+ char **args,
+ npy_intp *dimensions,
+ npy_intp *steps)
+{
+ POTRI_PARAMS_t params;
+ int error_occurred = get_fp_invalid_and_clear();
+ fortran_int n;
+ INIT_OUTER_LOOP_2
+
+ n = (fortran_int)dimensions[0];
+ if (init_@potri@(&params, uplo, n)) {
+ LINEARIZE_DATA_t a_in, r_out;
+
+ init_linearize_data(&a_in, n, n, steps[1], steps[0]);
+ init_linearize_data(&r_out, n, n, steps[3], steps[2]);
+
+ BEGIN_OUTER_LOOP_2
+ int not_ok;
+ linearize_@TYPE@_matrix(params.A, args[0], &a_in);
+ not_ok = call_@potri@(&params);
+ if (!not_ok) {
+ make_symmetric_@TYPE@_matrix(params.UPLO, params.N, params.A);
+ delinearize_@TYPE@_matrix(args[1], params.A, &r_out);
+ } else {
+ error_occurred = 1;
+ nan_@TYPE@_matrix(args[1], &r_out);
+ }
+ END_OUTER_LOOP
+
+ release_@potri@(&params);
+ }
+
+ set_fp_invalid_or_clear(error_occurred);
+}
+
+static void
+@TYPE@_poinv_up(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_poinv('U', args, dimensions, steps);
+}
+
+static void
+@TYPE@_poinv_lo(char **args,
+ npy_intp *dimensions,
+ npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ @TYPE@_poinv('L', args, dimensions, steps);
+}
+
+/**end repeat**/
+
+#pragma GCC diagnostic pop
+
+/* -------------------------------------------------------------------------- */
+ /* gufunc registration */
+
+static void *array_of_nulls[] = {
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL,
+ (void *)NULL
+};
+
+#define FUNC_ARRAY_NAME(NAME) NAME ## _funcs
+
+#define GUFUNC_FUNC_ARRAY_REAL(NAME) \
+ static PyUFuncGenericFunction \
+ FUNC_ARRAY_NAME(NAME)[] = { \
+ FLOAT_ ## NAME, \
+ DOUBLE_ ## NAME \
+ }
+
+#define GUFUNC_FUNC_ARRAY_REAL_COMPLEX(NAME) \
+ static PyUFuncGenericFunction \
+ FUNC_ARRAY_NAME(NAME)[] = { \
+ FLOAT_ ## NAME, \
+ DOUBLE_ ## NAME, \
+ CFLOAT_ ## NAME, \
+ CDOUBLE_ ## NAME \
+ }
+
+/* There are problems with eig in complex single precision.
+ * That kernel is disabled
+ */
+#define GUFUNC_FUNC_ARRAY_EIG(NAME) \
+ static PyUFuncGenericFunction \
+ FUNC_ARRAY_NAME(NAME)[] = { \
+ FLOAT_ ## NAME, \
+ DOUBLE_ ## NAME, \
+ CDOUBLE_ ## NAME \
+ }
+
+
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(inner1d);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(dotc1d);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(innerwt);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(matrix_multiply);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(slogdet);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(det);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(eighlo);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(eighup);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(eigvalshlo);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(eigvalshup);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(solve);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(solve1);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(inv);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(cholesky_up);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(cholesky_lo);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_N);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_S);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(svd_A);
+GUFUNC_FUNC_ARRAY_EIG(eig);
+GUFUNC_FUNC_ARRAY_EIG(eigvals);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(quadratic_form);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(add3);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply3);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply3_add);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply_add);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply_add2);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply4);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(multiply4_add);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(chosolve_up);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(chosolve_lo);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(chosolve1_up);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(chosolve1_lo);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(poinv_up);
+GUFUNC_FUNC_ARRAY_REAL_COMPLEX(poinv_lo);
+
+static char equal_2_types[] = {
+ NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char equal_3_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char equal_4_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char equal_5_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char equal_6_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+/* second result is logdet, that will always be a REAL */
+static char slogdet_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE
+};
+
+static char eigh_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE
+};
+
+static char eighvals_types[] = {
+ NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_FLOAT,
+ NPY_CDOUBLE, NPY_DOUBLE
+};
+
+static char eig_types[] = {
+ NPY_FLOAT, NPY_CFLOAT, NPY_CFLOAT,
+ NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char eigvals_types[] = {
+ NPY_FLOAT, NPY_CFLOAT,
+ NPY_DOUBLE, NPY_CDOUBLE,
+ NPY_CDOUBLE, NPY_CDOUBLE
+};
+
+static char svd_1_1_types[] = {
+ NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_FLOAT,
+ NPY_CDOUBLE, NPY_DOUBLE
+};
+
+static char svd_1_3_types[] = {
+ NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE,
+ NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT,
+ NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE
+};
+
+typedef struct gufunc_descriptor_struct {
+ char *name;
+ char *signature;
+ char *doc;
+ int ntypes;
+ int nin;
+ int nout;
+ PyUFuncGenericFunction *funcs;
+ char *types;
+} GUFUNC_DESCRIPTOR_t;
+
+GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = {
+ {
+ "inner1d",
+ "(i),(i)->()",
+ "inner on the last dimension and broadcast on the rest \n"\
+ " \"(i),(i)->()\" \n",
+ 4, 2, 1,
+ FUNC_ARRAY_NAME(inner1d),
+ equal_3_types
+ },
+ {
+ "dotc1d",
+ "(i),(i)->()",
+ "inner on the last dimension and broadcast on the rest \n"\
+ " \"(i),(i)->()\" \n",
+ 4, 2, 1,
+ FUNC_ARRAY_NAME(dotc1d),
+ equal_3_types
+ },
+ {
+ "innerwt",
+ "(i),(i),(i)->()",
+ "inner on the last dimension using 3 operands and broadcast on the"\
+ " rest \n"\
+ " \"(i),(i),(i)->()\" \n",
+ 2, 3, 1,
+ FUNC_ARRAY_NAME(innerwt),
+ equal_4_types
+ },
+ {
+ "matrix_multiply",
+ "(m,k),(k,n)->(m,n)",
+ "dot on the last two dimensions and broadcast on the rest \n"\
+ " \"(m,k),(k,n)->(m,n)\" \n",
+ 4, 2, 1,
+ FUNC_ARRAY_NAME(matrix_multiply),
+ equal_3_types
+ },
+ {
+ "slogdet",
+ "(m,m)->(),()",
+ "slogdet on the last two dimensions and broadcast on the rest. \n"\
+ "Results in two arrays, one with sign and the other with log of the"\
+ " determinants. \n"\
+ " \"(m,m)->(),()\" \n",
+ 4, 1, 2,
+ FUNC_ARRAY_NAME(slogdet),
+ slogdet_types
+ },
+ {
+ "det",
+ "(m,m)->()",
+ "det of the last two dimensions and broadcast on the rest. \n"\
+ " \"(m,m)->()\" \n",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(det),
+ equal_2_types
+ },
+ {
+ "eigh_lo",
+ "(m,m)->(m),(m,m)",
+ "eigh on the last two dimension and broadcast to the rest, using"\
+ " lower triangle \n"\
+ "Results in a vector of eigenvalues and a matrix with the"\
+ "eigenvectors. \n"\
+ " \"(m,m)->(m),(m,m)\" \n",
+ 4, 1, 2,
+ FUNC_ARRAY_NAME(eighlo),
+ eigh_types
+ },
+ {
+ "eigh_up",
+ "(m,m)->(m),(m,m)",
+ "eigh on the last two dimension and broadcast to the rest, using"\
+ " upper triangle. \n"\
+ "Results in a vector of eigenvalues and a matrix with the"\
+ " eigenvectors. \n"\
+ " \"(m,m)->(m),(m,m)\" \n",
+ 4, 1, 2,
+ FUNC_ARRAY_NAME(eighup),
+ eigh_types
+ },
+ {
+ "eigvalsh_lo",
+ "(m,m)->(m)",
+ "eigh on the last two dimension and broadcast to the rest, using"\
+ " lower triangle. \n"\
+ "Results in a vector of eigenvalues and a matrix with the"\
+ "eigenvectors. \n"\
+ " \"(m,m)->(m)\" \n",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(eigvalshlo),
+ eighvals_types
+ },
+ {
+ "eigvalsh_up",
+ "(m,m)->(m)",
+ "eigvalsh on the last two dimension and broadcast to the rest,"\
+ " using upper triangle. \n"\
+ "Results in a vector of eigenvalues and a matrix with the"\
+ "eigenvectors.\n"\
+ " \"(m,m)->(m)\" \n",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(eigvalshup),
+ eighvals_types
+ },
+ {
+ "solve",
+ "(m,m),(m,n)->(m,n)",
+ "solve the system a x = b, on the last two dimensions, broadcast"\
+ " to the rest. \n"\
+ "Results in a matrices with the solutions. \n"\
+ " \"(m,m),(m,n)->(m,n)\" \n",
+ 4, 2, 1,
+ FUNC_ARRAY_NAME(solve),
+ equal_3_types
+ },
+ {
+ "solve1",
+ "(m,m),(m)->(m)",
+ "solve the system a x = b, for b being a vector, broadcast in"\
+ " the outer dimensions. \n"\
+ "Results in vectors with the solutions. \n"\
+ " \"(m,m),(m)->(m)\" \n",
+ 4,2,1,
+ FUNC_ARRAY_NAME(solve1),
+ equal_3_types
+ },
+ {
+ "inv",
+ "(m,m)->(m,m)",
+ "compute the inverse of the last two dimensions and broadcast"\
+ " to the rest. \n"\
+ "Results in the inverse matrices. \n"\
+ " \"(m,m)->(m,m)\" \n",
+ 4,1,1,
+ FUNC_ARRAY_NAME(inv),
+ equal_2_types
+ },
+ {
+ "cholesky_up",
+ "(m,m)->(m,m)",
+ "cholesky decomposition of hermitian positive-definite matrices. \n"\
+ "Broadcast to all outer dimensions. \n"\
+ " \"(m,m)->(m,m)\" \n",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(cholesky_up),
+ equal_2_types
+ },
+ {
+ "cholesky_lo",
+ "(m,m)->(m,m)",
+ "cholesky decomposition of hermitian positive-definite matrices. \n"\
+ "Broadcast to all outer dimensions. \n"\
+ " \"(m,m)->(m,m)\" \n",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(cholesky_lo),
+ equal_2_types
+ },
+ {
+ "svd_m",
+ "(m,n)->(m)",
+ "svd when n>=m. ",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(svd_N),
+ equal_2_types
+ },
+ {
+ "svd_n",
+ "(m,n)->(n)",
+ "svd when n<=m",
+ 4, 1, 1,
+ FUNC_ARRAY_NAME(svd_N),
+ svd_1_1_types
+ },
+ {
+ "svd_m_s",
+ "(m,n)->(m,m),(m),(m,n)",
+ "svd when m>=n",
+ 4, 1, 3,
+ FUNC_ARRAY_NAME(svd_S),
+ svd_1_3_types
+ },
+ {
+ "svd_n_s",
+ "(m,n)->(m,n),(n),(n,n)",
+ "svd when m>=n",
+ 4, 1, 3,
+ FUNC_ARRAY_NAME(svd_S),
+ svd_1_3_types
+ },
+ {
+ "svd_m_f",
+ "(m,n)->(m,m),(m),(n,n)",
+ "svd when m>=n",
+ 4, 1, 3,
+ FUNC_ARRAY_NAME(svd_A),
+ svd_1_3_types
+ },
+ {
+ "svd_n_f",
+ "(m,n)->(m,m),(n),(n,n)",
+ "svd when m>=n",
+ 4, 1, 3,
+ FUNC_ARRAY_NAME(svd_A),
+ svd_1_3_types
+ },
+ {
+ "eig",
+ "(m,m)->(m),(m,m)",
+ "eig on the last two dimension and broadcast to the rest. \n"\
+ "Results in a vector with the eigenvalues and a matrix with the"\
+ " eigenvectors. \n"\
+ " \"(m,m)->(m),(m,m)\" \n",
+ 3, 1, 2,
+ FUNC_ARRAY_NAME(eig),
+ eig_types
+ },
+ {
+ "eigvals",
+ "(m,m)->(m)",
+ "eigvals on the last two dimension and broadcast to the rest. \n"\
+ "Results in a vector of eigenvalues. \n"\
+ " \"(m,m)->(m),(m,m)\" \n",
+ 3, 1, 1,
+ FUNC_ARRAY_NAME(eigvals),
+ eigvals_types
+ },
+ {
+ "quadratic_form",
+ "(m),(m,n),(n)->()",
+ "computes the quadratic form uQv in the inner dimensions, broadcast"\
+ "to the rest \n"\
+ "Results in the result of uQv for each element of the broadcasted"\
+ "dimensions. \n"\
+ " \"(m),(m,n),(n)->()",
+ 4,3,1,
+ FUNC_ARRAY_NAME(quadratic_form),
+ equal_4_types
+ },
+ {
+ "add3",
+ "(),(),()->()",
+ "3-way element-wise addition. a,b,c -> a+b+c for all elements. \n",
+ 4,3,1,
+ FUNC_ARRAY_NAME(add3),
+ equal_4_types
+ },
+ {
+ "multiply3",
+ "(),(),()->()",
+ "3-way element-wise product. a,b,c -> a*b*c for all elements. \n",
+ 4,3,1,
+ FUNC_ARRAY_NAME(multiply3),
+ equal_4_types
+ },
+ {
+ "multiply3_add",
+ "(),(),(),()->()",
+ "3-way element-wise product plus addition. a,b,c,d -> a*b*c+d"\
+ " for all elements. \n",
+ 4,4,1,
+ FUNC_ARRAY_NAME(multiply3_add),
+ equal_5_types
+ },
+ {
+ "multiply_add",
+ "(),(),()->()",
+ "element-wise multiply-add. a,b,c -> a*b+c for all elements. \n",
+ 4,3,1,
+ FUNC_ARRAY_NAME(multiply_add),
+ equal_4_types
+ },
+ {
+ "multiply_add2",
+ "(),(),(),()->()",
+ "element-wise product with 2 additions. a,b,c,d -> a*b+c+d for"\
+ " all elements. \n",
+ 4,4,1,
+ FUNC_ARRAY_NAME(multiply_add2),
+ equal_5_types
+ },
+ {
+ "multiply4",
+ "(),(),(),()->()",
+ "4-way element-wise product. a,b,c,d -> a*b*c*d for all elements. \n",
+ 4,4,1,
+ FUNC_ARRAY_NAME(multiply4),
+ equal_5_types
+ },
+ {
+ "multiply4_add",
+ "(),(),(),(),()->()",
+ "4-way element-wise product and addition. a,b,c,d,e -> a*b*c*d+e. \n",
+ 4,5,1,
+ FUNC_ARRAY_NAME(multiply4_add),
+ equal_6_types
+ },
+ { /* solve using cholesky decomposition (lapack potrs) */
+ "chosolve_up",
+ "(m,m),(m,n)->(m,n)",
+ "solve for symmetric/hermitian matrices using cholesky"\
+ " factorization. \n",
+ 4,2,1,
+ FUNC_ARRAY_NAME(chosolve_up),
+ equal_3_types
+ },
+ { /* solve using choleske decomposition (lapack potrs) */
+ "chosolve_lo",
+ "(m,m),(m,n)->(m,n)",
+ "solve for symmetric/hermitian matrices using cholesky"\
+ " factorization. \n",
+ 4,2,1,
+ FUNC_ARRAY_NAME(chosolve_lo),
+ equal_3_types
+ },
+ { /* solve using cholesky decomposition (lapack potrs) */
+ "chosolve1_up",
+ "(m,m),(m)->(m)",
+ "solve a system using cholesky decomposition. \n",
+ 4,2,1,
+ FUNC_ARRAY_NAME(chosolve1_up),
+ equal_3_types
+ },
+ { /* solve using cholesky decomposition (lapack potrs) */
+ "chosolve1_lo",
+ "(m,m),(m)->(m)",
+ "solve a system using cholesky decomposition. \n",
+ 4,2,1,
+ FUNC_ARRAY_NAME(chosolve1_lo),
+ equal_3_types
+ },
+ { /* inverse using cholesky decomposition (lapack potri) */
+ "poinv_up",
+ "(m,m)->(m,m)",
+ "inverse using cholesky decomposition for symmetric/hermitian"\
+ " matrices. \n",
+ 4,1,1,
+ FUNC_ARRAY_NAME(poinv_up),
+ equal_2_types
+ },
+ { /* inverse using cholesky decomposition (lapack potri) */
+ "poinv_lo",
+ "(m,m)->(m,m)",
+ "inverse using cholesky decomposition for symmetric/hermitian"\
+ " matrices. \n",
+ 4,1,1,
+ FUNC_ARRAY_NAME(poinv_lo),
+ equal_2_types
+ },
+};
+
+static void
+addUfuncs(PyObject *dictionary) {
+ PyObject *f;
+ int i;
+ const int gufunc_count = sizeof(gufunc_descriptors)/
+ sizeof(gufunc_descriptors[0]);
+ for (i=0; i < gufunc_count; i++) {
+ GUFUNC_DESCRIPTOR_t* d = &gufunc_descriptors[i];
+ f = PyUFunc_FromFuncAndDataAndSignature(d->funcs,
+ array_of_nulls,
+ d->types,
+ d->ntypes,
+ d->nin,
+ d->nout,
+ PyUFunc_None,
+ d->name,
+ d->doc,
+ 0,
+ d->signature);
+ PyDict_SetItemString(dictionary, d->name, f);
+#if 0
+ dump_ufunc_object((PyUFuncObject*) f);
+#endif
+ Py_DECREF(f);
+ }
+}
+
+
+
+/* -------------------------------------------------------------------------- */
+ /* Module initialization stuff */
+
+static PyMethodDef UMath_LinAlgMethods[] = {
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+#if defined(NPY_PY3K)
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ UMATH_LINALG_MODULE_NAME,
+ NULL,
+ -1,
+ UMath_LinAlgMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#endif
+
+#if defined(NPY_PY3K)
+#define RETVAL m
+PyObject *PyInit__umath_linalg(void)
+#else
+#define RETVAL
+PyMODINIT_FUNC
+init_umath_linalg(void)
+#endif
+{
+ PyObject *m;
+ PyObject *d;
+ PyObject *version;
+
+ init_constants();
+#if defined(NPY_PY3K)
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule(UMATH_LINALG_MODULE_NAME, UMath_LinAlgMethods);
+#endif
+ if (m == NULL)
+ return RETVAL;
+
+ import_array();
+ import_ufunc();
+
+ d = PyModule_GetDict(m);
+
+ version = PyString_FromString(umath_linalg_version_string);
+ PyDict_SetItemString(d, "__version__", version);
+ Py_DECREF(version);
+
+ /* Load the ufunc operators into the module's namespace */
+ addUfuncs(d);
+
+ if (PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot load _umath_linalg module.");
+ }
+
+ return RETVAL;
+}
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index 17caa9e02..0cb92f667 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -36,16 +36,18 @@ may now proceed to calculate the mean of the other values:
invalid operation.
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
-import core
-from core import *
+from . import core
+from .core import *
-import extras
-from extras import *
+from . import extras
+from .extras import *
__all__ = ['core', 'extras']
__all__ += core.__all__
diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py
index 2cc8f6a80..b8d085130 100644
--- a/numpy/ma/bench.py
+++ b/numpy/ma/bench.py
@@ -1,5 +1,6 @@
#! python
# encoding: utf-8
+from __future__ import division, absolute_import, print_function
import timeit
#import IPython.ipapi
@@ -41,7 +42,7 @@ nmzl = numpy.ma.array(zl, mask=maskx)
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
- print "%s : %-50s : " % (v,s),
+ print("%s : %-50s : " % (v,s), end=' ')
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
@@ -50,10 +51,10 @@ def timer(s, v='', nloop=500, nrep=3):
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
- print "%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
+ print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
- units[order])
+ units[order]))
# ip.magic('timeit -n%i %s' % (nloop,s))
@@ -61,24 +62,24 @@ def timer(s, v='', nloop=500, nrep=3):
def compare_functions_1v(func, nloop=500,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
funcname = func.__name__
- print "-"*50
- print "%s on small arrays" % funcname
+ print("-"*50)
+ print("%s on small arrays" % funcname)
module, data = "numpy.ma","nmxs"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
- print "%s on large arrays" % funcname
+ print("%s on large arrays" % funcname)
module, data = "numpy.ma","nmxl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
def compare_methods(methodname, args, vars='x', nloop=500, test=True,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
- print "-"*50
- print "%s on small arrays" % methodname
+ print("-"*50)
+ print("%s on small arrays" % methodname)
data, ver = "nm%ss" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
#
- print "%s on large arrays" % methodname
+ print("%s on large arrays" % methodname)
data, ver = "nm%sl" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
return
@@ -89,12 +90,12 @@ def compare_functions_2v(func, nloop=500, test=True,
xl=xl, nmxl=nmxl,
yl=yl, nmyl=nmyl):
funcname = func.__name__
- print "-"*50
- print "%s on small arrays" % funcname
+ print("-"*50)
+ print("%s on small arrays" % funcname)
module, data = "numpy.ma","nmxs,nmys"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
- print "%s on large arrays" % funcname
+ print("%s on large arrays" % funcname)
module, data = "numpy.ma","nmxl,nmyl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
@@ -148,18 +149,18 @@ if __name__ == '__main__':
compare_methods('__setitem__','0, 17', nloop=1000, test=False)
compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False)
#....................................................................
- print "-"*50
- print "__setitem__ on small arrays"
+ print("-"*50)
+ print("__setitem__ on small arrays")
timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
- print "-"*50
- print "__setitem__ on large arrays"
+ print("-"*50)
+ print("__setitem__ on large arrays")
timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
#....................................................................
- print "-"*50
- print "where on small arrays"
+ print("-"*50)
+ print("where on small arrays")
timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000)
- print "-"*50
- print "where on large arrays"
+ print("-"*50)
+ print("where on large arrays")
timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100)
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index a7e04cd13..ccf62bdcf 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -20,6 +20,25 @@ Released for unlimited redistribution.
"""
# pylint: disable-msg=E1002
+from __future__ import division, absolute_import, print_function
+
+import sys
+import warnings
+from functools import reduce
+
+import numpy as np
+import numpy.core.umath as umath
+import numpy.core.numerictypes as ntypes
+from numpy import ndarray, amax, amin, iscomplexobj, bool_
+from numpy import array as narray
+from numpy.lib.function_base import angle
+from numpy.compat import getargspec, formatargspec, long, basestring
+from numpy import expand_dims as n_expand_dims
+
+if sys.version_info[0] >= 3:
+ import pickle
+else:
+ import cPickle as pickle
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
@@ -67,23 +86,6 @@ __all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'var', 'where',
'zeros']
-import cPickle
-
-import numpy as np
-from numpy import ndarray, amax, amin, iscomplexobj, bool_
-from numpy import array as narray
-
-import numpy.core.umath as umath
-from numpy.lib.function_base import angle
-import numpy.core.numerictypes as ntypes
-from numpy.compat import getargspec, formatargspec
-from numpy import expand_dims as n_expand_dims
-import warnings
-
-import sys
-if sys.version_info[0] >= 3:
- from functools import reduce
-
MaskType = np.bool_
nomask = MaskType(0)
@@ -110,7 +112,7 @@ def get_object_signature(obj):
"""
try:
sig = formatargspec(*getargspec(obj))
- except TypeError, errmsg:
+ except TypeError as errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
@@ -414,7 +416,7 @@ def _check_fill_value(fill_value, ndtype):
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
- if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'):
+ if isinstance(fill_value, basestring) and (ndtype.char not in 'SVU'):
fill_value = default_fill_value(ndtype)
else:
# In case we want to convert 1e+20 to int...
@@ -628,7 +630,6 @@ def getdata(a, subok=True):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -837,13 +838,9 @@ class _MaskedUnaryOperation:
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
- # Save the error status
- err_status_ini = np.geterr()
- try:
+ with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(d, *args, **kwargs)
- finally:
- np.seterr(**err_status_ini)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
@@ -930,12 +927,9 @@ class _MaskedBinaryOperation:
else:
m = umath.logical_or(ma, mb)
# Get the result
- err_status_ini = np.geterr()
- try:
+ with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
- finally:
- np.seterr(**err_status_ini)
# Case 1. : scalar
if not result.ndim:
if m:
@@ -1068,12 +1062,9 @@ class _DomainedBinaryOperation:
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
- err_status_ini = np.geterr()
- try:
+ with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
- finally:
- np.seterr(**err_status_ini)
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
@@ -1261,7 +1252,6 @@ def getmask(a):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -1323,7 +1313,6 @@ def getmaskarray(arr):
Examples
--------
-
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
@@ -1531,7 +1520,7 @@ def make_mask_none(newshape, dtype=None):
----------
newshape : tuple
A tuple indicating the shape of the mask.
- dtype: {None, dtype}, optional
+ dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
@@ -2541,7 +2530,7 @@ class MaskedIterator(object):
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
- def next(self):
+ def __next__(self):
"""
Return the next value, or raise StopIteration.
@@ -2563,12 +2552,12 @@ class MaskedIterator(object):
StopIteration
"""
- d = self.dataiter.next()
- if self.maskiter is not None and self.maskiter.next():
+ d = next(self.dataiter)
+ if self.maskiter is not None and next(self.maskiter):
d = masked
return d
-
+ next = __next__
class MaskedArray(ndarray):
@@ -2852,7 +2841,54 @@ class MaskedArray(ndarray):
return result
- def view(self, dtype=None, type=None):
+ def view(self, dtype=None, type=None, fill_value=None):
+ """
+ Return a view of the MaskedArray data
+
+ Parameters
+ ----------
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ The default, None, results in the view having the same data-type
+ as `a`. As with ``ndarray.view``, dtype can also be specified as
+ an ndarray sub-class, which then specifies the type of the
+ returned object (this is equivalent to setting the ``type``
+ parameter).
+ type : Python type, optional
+ Type of the returned view, e.g., ndarray or matrix. Again, the
+ default None results in type preservation.
+
+ Notes
+ -----
+
+ ``a.view()`` is used two different ways:
+
+ ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+ of the array's memory with a different data-type. This can cause a
+ reinterpretation of the bytes of memory.
+
+ ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+ returns an instance of `ndarray_subclass` that looks at the same array
+ (same shape, dtype, etc.) This does not cause a reinterpretation of the
+ memory.
+
+ If `fill_value` is not specified, but `dtype` is specified (and is not
+ an ndarray sub-class), the `fill_value` of the MaskedArray will be
+ reset. If neither `fill_value` nor `dtype` are specified (or if
+ `dtype` is an ndarray sub-class), then the fill value is preserved.
+ Finally, if `fill_value` is specified, but `dtype` is not, the fill
+ value is set to the specified value.
+
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a
+ regular array to a structured array), then the behavior of the view
+ cannot be predicted just from the superficial appearance of ``a`` (shown
+ by ``print(a)``). It also depends on exactly how ``a`` is stored in
+ memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
+ defined as a slice or transpose, etc., the view may give different
+ results.
+ """
+
if dtype is None:
if type is None:
output = ndarray.view(self)
@@ -2882,7 +2918,13 @@ class MaskedArray(ndarray):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
- output._fill_value = None
+ if fill_value is None:
+ if dtype is None:
+ pass # leave _fill_value as is
+ else:
+ output._fill_value = None
+ else:
+ output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
@@ -2951,7 +2993,7 @@ class MaskedArray(ndarray):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
- dout = mvoid(dout, mask=mask)
+ dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
@@ -3386,12 +3428,12 @@ class MaskedArray(ndarray):
return np.asanyarray(fill_value)
#
if m.dtype.names:
- result = self._data.copy()
+ result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
- result = self._data.copy()
+ result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
@@ -3763,12 +3805,9 @@ class MaskedArray(ndarray):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
- err_status = np.geterr()
- try:
+ with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
- finally:
- np.seterr(**err_status)
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
@@ -4749,6 +4788,7 @@ class MaskedArray(ndarray):
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
+ out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
@@ -4773,10 +4813,10 @@ class MaskedArray(ndarray):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
- dvar = sqrt(dvar)
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
+ dvar = sqrt(dvar)
return dvar
std.__doc__ = np.std.__doc__
@@ -5483,7 +5523,7 @@ class MaskedArray(ndarray):
if memo is None:
memo = {}
memo[id(self)] = copied
- for (k, v) in self.__dict__.iteritems():
+ for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
@@ -5507,11 +5547,12 @@ class mvoid(MaskedArray):
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
- def __new__(self, data, mask=nomask, dtype=None, fill_value=None):
+ def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
+ hardmask=False):
dtype = dtype or data.dtype
- _data = ndarray((), dtype=dtype)
- _data[()] = data
+ _data = np.array(data, dtype=dtype)
_data = _data.view(self)
+ _data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
@@ -5541,7 +5582,10 @@ class mvoid(MaskedArray):
def __setitem__(self, indx, value):
self._data[indx] = value
- self._mask[indx] |= getattr(value, "_mask", False)
+ if self._hardmask:
+ self._mask[indx] |= getattr(value, "_mask", False)
+ else:
+ self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
@@ -5586,6 +5630,9 @@ class mvoid(MaskedArray):
else:
yield d
+ def __len__(self):
+ return self._data.__len__()
+
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
@@ -5598,7 +5645,7 @@ class mvoid(MaskedArray):
Returns
-------
- filled_void:
+ filled_void
A `np.void` object
See Also
@@ -5885,7 +5932,7 @@ def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
- # If obj doesn't have a max method,
+ # If obj doesn't have a min method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
@@ -5904,7 +5951,7 @@ def ptp(obj, axis=None, out=None, fill_value=None):
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
- # If obj doesn't have a max method,
+ # If obj doesn't have a ptp method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
@@ -5923,9 +5970,10 @@ class _frommethod:
Name of the method to transform.
"""
- def __init__(self, methodname):
+ def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
+ self.reversed = reversed
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
@@ -5937,6 +5985,11 @@ class _frommethod:
return doc
#
def __call__(self, a, *args, **params):
+ if self.reversed:
+ args = list(args)
+ arr = args[0]
+ args[0] = a
+ a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
@@ -5953,7 +6006,7 @@ class _frommethod:
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
-compress = _frommethod('compress')
+compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
@@ -6017,12 +6070,9 @@ def power(a, b, third=None):
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
- err_status = np.geterr()
- try:
+ with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
- finally:
- np.seterr(**err_status)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
@@ -6985,7 +7035,7 @@ def dump(a, F):
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
- return cPickle.dump(a, F)
+ return pickle.dump(a, F)
def dumps(a):
"""
@@ -7000,7 +7050,7 @@ def dumps(a):
returned.
"""
- return cPickle.dumps(a)
+ return pickle.dumps(a)
def load(F):
"""
@@ -7024,7 +7074,7 @@ def load(F):
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
- return cPickle.load(F)
+ return pickle.load(F)
def loads(strg):
"""
@@ -7042,7 +7092,7 @@ def loads(strg):
dumps : Return a string corresponding to the pickling of a masked array.
"""
- return cPickle.loads(strg)
+ return pickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index c5b3a3957..d14812093 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -8,6 +8,8 @@ A collection of utilities for `numpy.ma`.
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
@@ -36,8 +38,8 @@ __all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
import itertools
import warnings
-import core as ma
-from core import MaskedArray, MAError, add, array, asarray, concatenate, count, \
+from . import core as ma
+from .core import MaskedArray, MAError, add, array, asarray, concatenate, count, \
filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \
mask_or, nomask, ones, sort, zeros
#from core import *
@@ -335,7 +337,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
% (axis, nd))
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
- indlist = range(nd)
+ indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
@@ -453,7 +455,8 @@ def average(a, axis=None, weights=None, returned=False):
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
- weight equal to one.
+ weight equal to one. If `weights` is complex, the imaginary parts
+ are ignored.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
@@ -513,7 +516,7 @@ def average(a, axis=None, weights=None, returned=False):
if mask is nomask:
if weights is None:
d = ash[axis] * 1.0
- n = add.reduce(a._data, axis, dtype=float)
+ n = add.reduce(a._data, axis)
else:
w = filled(weights, 0.0)
wsh = w.shape
@@ -529,14 +532,14 @@ def average(a, axis=None, weights=None, returned=False):
r = [None] * len(ash)
r[axis] = slice(None, None, 1)
w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)")
- n = add.reduce(a * w, axis, dtype=float)
+ n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
del w, r
else:
raise ValueError('average: weights wrong shape.')
else:
if weights is None:
- n = add.reduce(a, axis, dtype=float)
+ n = add.reduce(a, axis)
d = umath.add.reduce((-mask), axis=axis, dtype=float)
else:
w = filled(weights, 0.0)
@@ -545,7 +548,7 @@ def average(a, axis=None, weights=None, returned=False):
wsh = (1,)
if wsh == ash:
w = array(w, dtype=float, mask=mask, copy=0)
- n = add.reduce(a * w, axis, dtype=float)
+ n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
elif wsh == (ash[axis],):
ni = ash[axis]
@@ -553,7 +556,7 @@ def average(a, axis=None, weights=None, returned=False):
r[axis] = slice(None, None, 1)
w = eval ("w[" + repr(tuple(r)) + \
"] * masked_array(ones(ash, float), mask)")
- n = add.reduce(a * w, axis, dtype=float)
+ n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
else:
raise ValueError('average: weights wrong shape.')
@@ -578,7 +581,6 @@ def average(a, axis=None, weights=None, returned=False):
return result
-
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
@@ -727,7 +729,7 @@ def compress_rowcols(x, axis=None):
if m.all():
return nxarray([])
# Builds a list of rows/columns indices
- (idxr, idxc) = (range(len(x)), range(x.shape[1]))
+ (idxr, idxc) = (list(range(len(x))), list(range(x.shape[1])))
masked = m.nonzero()
if not axis:
for i in np.unique(masked[0]):
@@ -1119,7 +1121,7 @@ def setxor1d(ar1, ar2, assume_unique=False):
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
-def in1d(ar1, ar2, assume_unique=False):
+def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
@@ -1145,8 +1147,11 @@ def in1d(ar1, ar2, assume_unique=False):
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
- equal_adj = (sar[1:] == sar[:-1])
- flag = ma.concatenate((equal_adj, [False]))
+ if invert:
+ bool_ar = (sar[1:] != sar[:-1])
+ else:
+ bool_ar = (sar[1:] == sar[:-1])
+ flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
@@ -1294,7 +1299,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
Raises
------
- ValueError:
+ ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
@@ -1397,14 +1402,13 @@ def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
- _x = mask_cols(vstack((x[i], x[j]))).var(axis=1,
- ddof=1 - bias)
+ _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, ddof=ddof)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
- _x = mask_cols(vstack((x[:, i], x[:, j]))).var(axis=1,
- ddof=1 - bias)
+ _x = mask_cols(
+ vstack((x[:, i], x[:, j]))).var(axis=1, ddof=ddof)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
@@ -1430,14 +1434,14 @@ class MAxisConcatenator(AxisConcatenator):
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
- if type(key) is not tuple:
+ if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
- if type(key[k]) is slice:
+ if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
@@ -1445,12 +1449,12 @@ class MAxisConcatenator(AxisConcatenator):
start = 0
if step is None:
step = 1
- if type(step) is type(1j):
+ if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
- elif type(key[k]) is str:
+ elif isinstance(key[k], str):
if (key[k] in 'rc'):
self.matrix = True
self.col = (key[k] == 'c')
@@ -1865,9 +1869,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
if w is not None:
w = asarray(w)
if w.ndim != 1:
- raise TypeError, "expected a 1-d array for weights"
+ raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0] :
- raise TypeError, "expected w and y to have the same length"
+ raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 44d273d34..a2380d813 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -6,7 +6,10 @@ Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
:author: Pierre Gerard-Marchant
+
"""
+from __future__ import division, absolute_import, print_function
+
#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask',
#!!!: or whatever restricted keywords.
#!!!: An idea would be to no bother in the first place, and then rename the
@@ -17,13 +20,17 @@ and the masking of individual fields.
__author__ = "Pierre GF Gerard-Marchant"
import sys
+import warnings
import numpy as np
-from numpy import bool_, dtype, \
- ndarray, recarray, array as narray
import numpy.core.numerictypes as ntypes
-from numpy.core.records import fromarrays as recfromarrays, \
- fromrecords as recfromrecords
+from numpy.compat import basestring
+from numpy import (
+ bool_, dtype, ndarray, recarray, array as narray
+ )
+from numpy.core.records import (
+ fromarrays as recfromarrays, fromrecords as recfromrecords
+ )
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
@@ -34,7 +41,6 @@ from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \
_check_fill_value = ma.core._check_fill_value
-import warnings
__all__ = ['MaskedRecords', 'mrecarray',
'fromarrays', 'fromrecords', 'fromtextfile', 'addfield',
@@ -52,7 +58,7 @@ def _getformats(data):
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
- formats += `obj.itemsize`
+ formats += repr(obj.itemsize)
formats += ','
return formats[:-1]
@@ -71,7 +77,7 @@ If the argument `names` is not None, updates the field names to valid names.
elif isinstance(names, str):
new_names = names.split(',')
else:
- raise NameError("illegal input names %s" % `names`)
+ raise NameError("illegal input names %s" % repr(names))
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
@@ -252,7 +258,7 @@ class MaskedRecords(MaskedArray, object):
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
- raise exctype, value
+ raise exctype(value)
else:
# Get the list of names ......
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
@@ -505,7 +511,7 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
- _array._mask.flat = zip(*masklist)
+ _array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
diff --git a/numpy/ma/setup.py b/numpy/ma/setup.py
index 024746655..2175628d6 100644
--- a/numpy/ma/setup.py
+++ b/numpy/ma/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
diff --git a/numpy/ma/setupscons.py b/numpy/ma/setupscons.py
deleted file mode 100644
index 024746655..000000000
--- a/numpy/ma/setupscons.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
-__version__ = '1.0'
-__revision__ = "$Revision: 3473 $"
-__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
-
-import os
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('ma',parent_package,top_path)
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- config = configuration(top_path='').todict()
- setup(**config)
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 48ea6e4f9..088e203b2 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -4,27 +4,25 @@
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant"
-import types
import warnings
+import sys
+import pickle
+from functools import reduce
import numpy as np
+import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
from numpy import ndarray
from numpy.ma.testutils import *
-
-import numpy.ma.core
from numpy.ma.core import *
-
from numpy.compat import asbytes, asbytes_nested
-from numpy.testing.utils import WarningManager
pi = np.pi
-import sys
-if sys.version_info[0] >= 3:
- from functools import reduce
#..............................................................................
class TestMaskedArray(TestCase):
@@ -140,8 +138,8 @@ class TestMaskedArray(TestCase):
def test_concatenate_flexible(self):
"Tests the concatenation on flexible arrays."
- data = masked_array(zip(np.random.rand(10),
- np.arange(10)),
+ data = masked_array(list(zip(np.random.rand(10),
+ np.arange(10))),
dtype=[('a', float), ('b', int)])
#
test = concatenate([data[:5], data[5:]])
@@ -197,15 +195,12 @@ class TestMaskedArray(TestCase):
def test_fix_invalid(self):
"Checks fix_invalid."
- err_status_ini = np.geterr()
- try:
+ with np.errstate():
np.seterr(invalid='ignore')
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
- finally:
- np.seterr(**err_status_ini)
def test_maskedelement(self):
"Test of masked element"
@@ -367,20 +362,18 @@ class TestMaskedArray(TestCase):
def test_pickling(self):
"Tests pickling"
- import cPickle
a = arange(10)
a[::3] = masked
a.fill_value = 999
- a_pickled = cPickle.loads(a.dumps())
+ a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
assert_equal(a_pickled.fill_value, 999)
def test_pickling_subbaseclass(self):
"Test pickling w/ a subclass of ndarray"
- import cPickle
- a = array(np.matrix(range(10)), mask=[1, 0, 1, 0, 0] * 2)
- a_pickled = cPickle.loads(a.dumps())
+ a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
self.assertTrue(isinstance(a_pickled._data, np.matrix))
@@ -388,37 +381,28 @@ class TestMaskedArray(TestCase):
def test_pickling_maskedconstant(self):
"Test pickling MaskedConstant"
- import cPickle
mc = np.ma.masked
- mc_pickled = cPickle.loads(mc.dumps())
+ mc_pickled = pickle.loads(mc.dumps())
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
"Tests pickling w/ structured array"
- import cPickle
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
- a_pickled = cPickle.loads(a.dumps())
+ a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
"Tests pickling w/ F_CONTIGUOUS arrays"
- import cPickle
a = arange(10)
a.shape = (-1, 2)
b = a.T
- test = cPickle.loads(cPickle.dumps(b))
+ test = pickle.loads(pickle.dumps(b))
assert_equal(test, b)
-# def test_pickling_oddity(self):
-# "Test some pickling oddity"
-# import cPickle
-# a = array([{'a':1}, {'b':2}, 3], dtype=object)
-# test = cPickle.loads(cPickle.dumps(a))
-# assert_equal(test, a)
def test_single_element_subscript(self):
"Tests single element subscripts of Maskedarrays."
@@ -437,13 +421,9 @@ class TestMaskedArray(TestCase):
assert_equal(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
#
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_(np.isnan(float(array([1], mask=[1]))))
- finally:
- warn_ctx.__exit__()
#
a = array([1, 2, 3], mask=[1, 0, 0])
self.assertRaises(TypeError, lambda:float(a))
@@ -549,6 +529,15 @@ class TestMaskedArray(TestCase):
assert_equal(test, control)
+ def test_filled_w_f_order(self):
+ "Test filled w/ F-contiguous array"
+ a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
+ mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
+ order='F') # this is currently ignored
+ self.assertTrue(a.flags['F_CONTIGUOUS'])
+ self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
+
+
def test_optinfo_propagation(self):
"Checks that _optinfo dictionary isn't back-propagated"
x = array([1, 2, 3, ], dtype=float)
@@ -847,7 +836,7 @@ class TestMaskedArrayArithmetic(TestCase):
if sys.version_info[0] >= 3:
assert_(isinstance(count(ott), np.integer))
else:
- assert_(isinstance(count(ott), types.IntType))
+ assert_(isinstance(count(ott), int))
assert_equal(3, count(ott))
assert_(getmask(count(ott, 0)) is nomask)
assert_equal([1, 2], count(ott, 0))
@@ -1511,7 +1500,7 @@ class TestFillingValues(TestCase):
"Test setting fill_value on individual fields"
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
- a = array(zip([1, 2, 3], [4, 5, 6]),
+ a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
f = a._fill_value
aa = a['a']
@@ -1521,7 +1510,7 @@ class TestFillingValues(TestCase):
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
- t = array(zip([1, 2, 3], [4, 5, 6]), dtype=[('a', int), ('b', int)])
+ t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=[('a', int), ('b', int)])
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
@@ -1554,6 +1543,48 @@ class TestFillingValues(TestCase):
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
+ def test_fillvalue_in_view(self):
+ "Test the behavior of fill_value in view"
+
+ # Create initial masked array
+ x = array([1,2,3], fill_value=1, dtype=np.int64)
+
+ # Check that fill_value is preserved by default
+ y = x.view()
+ assert_(y.fill_value==1)
+
+ # Check that fill_value is preserved if dtype is specified and the
+ # dtype is an ndarray sub-class and has a _fill_value attribute
+ y = x.view(MaskedArray)
+ assert_(y.fill_value==1)
+
+ # Check that fill_value is preserved if type is specified and the
+ # dtype is an ndarray sub-class and has a _fill_value attribute (by
+ # default, the first argument is dtype, not type)
+ y = x.view(type=MaskedArray)
+ assert_(y.fill_value==1)
+
+ # Check that code does not crash if passed an ndarray sub-class that
+ # does not have a _fill_value attribute
+ y = x.view(np.ndarray)
+ y = x.view(type=np.ndarray)
+
+ # Check that fill_value can be overriden with view
+ y = x.view(MaskedArray, fill_value=2)
+ assert_(y.fill_value==2)
+
+ # Check that fill_value can be overriden with view (using type=)
+ y = x.view(type=MaskedArray, fill_value=2)
+ assert_(y.fill_value==2)
+
+ # Check that fill_value gets reset if passed a dtype but not a
+ # fill_value. This is because even though in some cases one can safely
+ # cast the fill_value, e.g. if taking an int64 view of an int32 array,
+ # in other cases, this cannot be done (e.g. int32 view of an int64
+ # array with a large fill_value).
+ y = x.view(dtype=np.int32)
+ assert_(y.fill_value == 999999)
+
#------------------------------------------------------------------------------
class TestUfuncs(TestCase):
@@ -2481,9 +2512,9 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
- x = array(zip([1, 2, 3],
+ x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
- ['one', 'two', 'thr']),
+ ['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
@@ -2718,8 +2749,8 @@ class TestMaskedArrayMathMethods(TestCase):
def test_varstd_specialcases(self):
"Test a special case for var"
- nout = np.empty(1, dtype=float)
- mout = empty(1, dtype=float)
+ nout = np.array(-1, dtype=float)
+ mout = array(-1, dtype=float)
#
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
@@ -2728,11 +2759,15 @@ class TestMaskedArrayMathMethods(TestCase):
self.assertTrue(method(0) is masked)
self.assertTrue(method(-1) is masked)
# Using a masked array as explicit output
- _ = method(out=mout)
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ _ = method(out=mout)
self.assertTrue(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
- _ = method(out=nout)
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ _ = method(out=nout)
self.assertTrue(np.isnan(nout))
#
x = array(arange(10), mask=True)
@@ -2927,11 +2962,11 @@ class TestMaskedArrayFunctions(TestCase):
def test_masked_otherfunctions(self):
- assert_equal(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4])
- assert_equal(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199])
- assert_equal(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])
- assert_equal(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])
- assert_equal(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])
+ assert_equal(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])
+ assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
+ assert_equal(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])
+ assert_equal(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])
+ assert_equal(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])
@@ -3330,6 +3365,19 @@ class TestMaskedArrayFunctions(TestCase):
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
+ def test_compress(self):
+ # Test compress function on ndarray and masked array
+ # Address Github #2495.
+ arr = np.arange(8)
+ arr.shape = 4,2
+ cond = np.array([True, False, True, True])
+ control = arr[[0, 2, 3]]
+ test = np.ma.compress(cond, arr, axis=0)
+ assert_equal(test, control)
+ marr = np.ma.array(arr)
+ test = np.ma.compress(cond, marr, axis=0)
+ assert_equal(test, control)
+
#------------------------------------------------------------------------------
class TestMaskedFields(TestCase):
@@ -3341,7 +3389,7 @@ class TestMaskedFields(TestCase):
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
- base = array(zip(ilist, flist, slist), mask=mask, dtype=ddtype)
+ base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
@@ -3418,7 +3466,7 @@ class TestMaskedFields(TestCase):
#
def test_view(self):
"Test view w/ flexible dtype"
- iterator = zip(np.arange(10), np.random.rand(10))
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
@@ -3438,9 +3486,9 @@ class TestMaskedFields(TestCase):
#
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
- a = array(zip(np.random.rand(10), np.arange(10)), dtype=ndtype)
- a.mask = np.array(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
- [1, 0, 0, 0, 0, 0, 0, 0, 1, 0]),
+ a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
+ a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
# No mask
self.assertTrue(isinstance(a[1], MaskedArray))
@@ -3453,12 +3501,40 @@ class TestMaskedFields(TestCase):
assert_equal_records(a[-2]._data, a._data[-2])
assert_equal_records(a[-2]._mask, a._mask[-2])
+ def test_setitem(self):
+ # Issue 2403
+ ndtype = np.dtype([('a', float), ('b', int)])
+ mdtype = np.dtype([('a', bool), ('b', bool)])
+ # soft mask
+ control = np.array([(False, True), (True, True)], dtype=mdtype)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a['a'][0] = 2
+ assert_equal(a.mask, control)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a[0]['a'] = 2
+ assert_equal(a.mask, control)
+ # hard mask
+ control = np.array([(True, True), (True, True)], dtype=mdtype)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a.harden_mask()
+ a['a'][0] = 2
+ assert_equal(a.mask, control)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a.harden_mask()
+ a[0]['a'] = 2
+ assert_equal(a.mask, control)
+
+ def test_element_len(self):
+ # check that len() works for mvoid (Github issue #576)
+ for rec in self.data['base']:
+ assert_equal(len(rec), len(self.data['ddtype']))
+
#------------------------------------------------------------------------------
class TestMaskedView(TestCase):
#
def setUp(self):
- iterator = zip(np.arange(10), np.random.rand(10))
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index f1b36a15d..4b30813b2 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -5,7 +5,10 @@ Adapted from the original test_ma by Pierre Gerard-Marchant
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
@@ -13,9 +16,18 @@ __date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
import numpy as np
from numpy.testing import TestCase, run_module_suite
-from numpy.ma.testutils import *
-from numpy.ma.core import *
-from numpy.ma.extras import *
+from numpy.ma.testutils import (rand, assert_, assert_array_equal,
+ assert_equal, assert_almost_equal)
+from numpy.ma.core import (array, arange, masked, MaskedArray, masked_array,
+ getmaskarray, shape, nomask, ones, zeros, count)
+from numpy.ma.extras import (atleast_2d, mr_, dot, polyfit,
+ cov, corrcoef, median, average,
+ unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, ediff1d,
+ apply_over_axes, apply_along_axis,
+ compress_rowcols, mask_rowcols,
+ clump_masked, clump_unmasked,
+ flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
+ masked_all, masked_all_like)
class TestGeneric(TestCase):
@@ -50,7 +62,6 @@ class TestGeneric(TestCase):
control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
assert_equal(test, control)
-
def test_masked_all_like(self):
"Tests masked_all"
# Standard dtype
@@ -129,10 +140,10 @@ class TestAverage(TestCase):
"More tests of average."
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
- x = arange(6, dtype=float_)
+ x = arange(6, dtype=np.float_)
assert_equal(average(x, axis=0), 2.5)
assert_equal(average(x, axis=0, weights=w1), 2.5)
- y = array([arange(6, dtype=float_), 2.0 * arange(6)])
+ y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
assert_equal(average(y, axis=1),
@@ -164,12 +175,12 @@ class TestAverage(TestCase):
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
- assert_equal(shape(r1) , shape(w1))
- assert_equal(r1.shape , w1.shape)
+ assert_equal(shape(r1), shape(w1))
+ assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
- assert_equal(shape(w2) , shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
- assert_equal(shape(w2) , shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
assert_equal(shape(w2), shape(r2))
a2d = array([[1, 2], [0, 4]], float)
@@ -190,6 +201,50 @@ class TestAverage(TestCase):
a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
assert_equal(a, 1.5)
+ def test_complex(self):
+ # Test with complex data.
+ # (Regression test for https://github.com/numpy/numpy/issues/2684)
+ mask = np.array([[0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0]], dtype=bool)
+ a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j],
+ [9j, 0+1j, 2+3j, 4+5j, 7+7j]],
+ mask=mask)
+
+ av = average(a)
+ expected = np.average(a.compressed())
+ assert_almost_equal(av.real, expected.real)
+ assert_almost_equal(av.imag, expected.imag)
+
+ av0 = average(a, axis=0)
+ expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j
+ assert_almost_equal(av0.real, expected0.real)
+ assert_almost_equal(av0.imag, expected0.imag)
+
+ av1 = average(a, axis=1)
+ expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j
+ assert_almost_equal(av1.real, expected1.real)
+ assert_almost_equal(av1.imag, expected1.imag)
+
+ # Test with the 'weights' argument.
+ wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5],
+ [1.0, 1.0, 1.0, 1.0, 1.0]])
+ wav = average(a, weights=wts)
+ expected = np.average(a.compressed(), weights=wts[~mask])
+ assert_almost_equal(wav.real, expected.real)
+ assert_almost_equal(wav.imag, expected.imag)
+
+ wav0 = average(a, weights=wts, axis=0)
+ expected0 = (average(a.real, weights=wts, axis=0) +
+ average(a.imag, weights=wts, axis=0)*1j)
+ assert_almost_equal(wav0.real, expected0.real)
+ assert_almost_equal(wav0.imag, expected0.imag)
+
+ wav1 = average(a, weights=wts, axis=1)
+ expected1 = (average(a.real, weights=wts, axis=1) +
+ average(a.imag, weights=wts, axis=1)*1j)
+ assert_almost_equal(wav1.real, expected1.real)
+ assert_almost_equal(wav1.imag, expected1.imag)
+
class TestConcatenator(TestCase):
"""
@@ -203,7 +258,7 @@ class TestConcatenator(TestCase):
m = [1, 0, 0, 0, 0]
d = masked_array(b, mask=m)
c = mr_[d, 0, 0, d]
- self.assertTrue(isinstance(c, MaskedArray) or isinstance(c, core.MaskedArray))
+ self.assertTrue(isinstance(c, MaskedArray))
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
assert_array_equal(c.mask, mr_[m, 0, 0, m])
@@ -227,7 +282,6 @@ class TestConcatenator(TestCase):
assert_array_equal(d.mask, np.r_[m_1, m_2])
-
class TestNotMasked(TestCase):
"""
Tests notmasked_edges and notmasked_contiguous.
@@ -267,7 +321,6 @@ class TestNotMasked(TestCase):
assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)])
assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
-
def test_contiguous(self):
"Tests notmasked_contiguous"
a = masked_array(np.arange(24).reshape(3, 8),
@@ -292,12 +345,12 @@ class TestNotMasked(TestCase):
assert_equal(tmp[2][-2], slice(0, 6, None))
-
class Test2DFunctions(TestCase):
"Tests 2D functions"
def test_compress2d(self):
"Tests compress2d"
- x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[4, 5], [7, 8]])
assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]])
assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]])
@@ -313,21 +366,31 @@ class Test2DFunctions(TestCase):
assert_equal(compress_rowcols(x).size, 0)
assert_equal(compress_rowcols(x, 0).size, 0)
assert_equal(compress_rowcols(x, 1).size, 0)
- #
+
def test_mask_rowcols(self):
"Tests mask_rowcols."
- x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
- assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
- assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [0, 0, 0], [0, 0, 0]])
- assert_equal(mask_rowcols(x, 1).mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[1, 1, 1], [0, 0, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1).mask,
+ [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
- assert_equal(mask_rowcols(x).mask, [[0, 1, 0], [1, 1, 1], [0, 1, 0]])
- assert_equal(mask_rowcols(x, 0).mask, [[0, 0, 0], [1, 1, 1], [0, 0, 0]])
- assert_equal(mask_rowcols(x, 1).mask, [[0, 1, 0], [0, 1, 0], [0, 1, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[0, 0, 0], [1, 1, 1], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1).mask,
+ [[0, 1, 0], [0, 1, 0], [0, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
- assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 1, 1], [1, 1, 0]])
- assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [1, 1, 1], [0, 0, 0]])
- assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[1, 1, 1], [1, 1, 1], [1, 1, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[1, 1, 1], [1, 1, 1], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1,).mask,
+ [[1, 1, 0], [1, 1, 0], [1, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue(mask_rowcols(x).all() is masked)
self.assertTrue(mask_rowcols(x, 0).all() is masked)
@@ -335,7 +398,7 @@ class Test2DFunctions(TestCase):
self.assertTrue(mask_rowcols(x).mask.all())
self.assertTrue(mask_rowcols(x, 0).mask.all())
self.assertTrue(mask_rowcols(x, 1).mask.all())
- #
+
def test_dot(self):
"Tests dot product"
n = np.arange(1, 7)
@@ -407,9 +470,7 @@ class Test2DFunctions(TestCase):
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
-
class TestApplyAlongAxis(TestCase):
- #
"Tests 2D functions"
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
@@ -419,21 +480,20 @@ class TestApplyAlongAxis(TestCase):
assert_equal(xa, [[1, 4], [7, 10]])
-
class TestApplyOverAxes(TestCase):
"Tests apply_over_axes"
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
test = apply_over_axes(np.sum, a, [0, 2])
- ctrl = np.array([[[ 60], [ 92], [124]]])
+ ctrl = np.array([[[60], [92], [124]]])
assert_equal(test, ctrl)
a[(a % 2).astype(np.bool)] = masked
test = apply_over_axes(np.sum, a, [0, 2])
- ctrl = np.array([[[ 30], [ 44], [60]]])
+ ctrl = np.array([[[30], [44], [60]]])
class TestMedian(TestCase):
- #
+
def test_2d(self):
"Tests median w/ 2D"
(n, p) = (101, 30)
@@ -449,7 +509,7 @@ class TestMedian(TestCase):
assert_equal(median(z), 0)
assert_equal(median(z, axis=0), np.zeros(p))
assert_equal(median(z.T, axis=1), np.zeros(p))
- #
+
def test_2d_waxis(self):
"Tests median w/ 2D arrays and different axis."
x = masked_array(np.arange(30).reshape(10, 3))
@@ -458,7 +518,7 @@ class TestMedian(TestCase):
assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
- #
+
def test_3d(self):
"Tests median w/ 3D"
x = np.ma.arange(24).reshape(3, 4, 2)
@@ -471,7 +531,6 @@ class TestMedian(TestCase):
assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
-
class TestCov(TestCase):
def setUp(self):
@@ -525,16 +584,18 @@ class TestCov(TestCase):
valid = np.logical_not(getmaskarray(x)).astype(int)
frac = np.dot(valid, valid.T)
xf = (x - x.mean(1)[:, None]).filled(0)
- assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
+ assert_almost_equal(cov(x),
+ np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
assert_almost_equal(cov(x, bias=True),
np.cov(xf, bias=True) * x.shape[1] / frac)
frac = np.dot(valid.T, valid)
xf = (x - x.mean(0)).filled(0)
assert_almost_equal(cov(x, rowvar=False),
- np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.))
+ (np.cov(xf, rowvar=False) *
+ (x.shape[0] - 1) / (frac - 1.)))
assert_almost_equal(cov(x, rowvar=False, bias=True),
- np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac)
-
+ (np.cov(xf, rowvar=False, bias=True) *
+ x.shape[0] / frac))
class TestCorrcoef(TestCase):
@@ -547,7 +608,6 @@ class TestCorrcoef(TestCase):
x = self.data
assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
-
def test_1d_wo_missing(self):
"Test cov on 1D variable w/o missing values"
x = self.data
@@ -573,7 +633,8 @@ class TestCorrcoef(TestCase):
x -= x.mean()
nx = x.compressed()
assert_almost_equal(np.corrcoef(nx), corrcoef(x))
- assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False))
+ assert_almost_equal(np.corrcoef(nx, rowvar=False),
+ corrcoef(x, rowvar=False))
assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
#
@@ -601,7 +662,6 @@ class TestCorrcoef(TestCase):
assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
-
class TestPolynomial(TestCase):
#
def test_polyfit(self):
@@ -617,7 +677,8 @@ class TestPolynomial(TestCase):
y[0, 0] = y[-1, -1] = masked
#
(C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
- (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3,
+ full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
@@ -631,7 +692,7 @@ class TestPolynomial(TestCase):
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
- w = np.random.rand(10) + 1
+ w = np.random.rand(10) + 1
wo = w.copy()
xs = x[1:-1]
ys = y[1:-1]
@@ -644,7 +705,7 @@ class TestPolynomial(TestCase):
class TestArraySetOps(TestCase):
- #
+
def test_unique_onlist(self):
"Test unique on list"
data = [1, 1, 1, 2, 2, 3]
@@ -693,7 +754,7 @@ class TestArraySetOps(TestCase):
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
- #
+
def test_ediff1d_tobegin(self):
"Test ediff1d w/ to_begin"
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
@@ -708,7 +769,7 @@ class TestArraySetOps(TestCase):
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
- #
+
def test_ediff1d_toend(self):
"Test ediff1d w/ to_end"
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
@@ -723,7 +784,7 @@ class TestArraySetOps(TestCase):
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
- #
+
def test_ediff1d_tobegin_toend(self):
"Test ediff1d w/ to_begin and to_end"
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
@@ -734,11 +795,12 @@ class TestArraySetOps(TestCase):
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
- control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0])
+ control = array([0, 1, 1, 1, 4, 1, 2, 3],
+ mask=[1, 1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
- #
+
def test_ediff1d_ndarray(self):
"Test ediff1d w/ a ndarray"
x = np.arange(5)
@@ -755,7 +817,6 @@ class TestArraySetOps(TestCase):
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
-
def test_intersect1d(self):
"Test intersect1d"
x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
@@ -764,7 +825,6 @@ class TestArraySetOps(TestCase):
control = array([1, 3, -1], mask=[0, 0, 1])
assert_equal(test, control)
-
def test_setxor1d(self):
"Test setxor1d"
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
@@ -791,7 +851,6 @@ class TestArraySetOps(TestCase):
#
assert_array_equal([], setxor1d([], []))
-
def test_in1d(self):
"Test in1d"
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
@@ -806,6 +865,17 @@ class TestArraySetOps(TestCase):
#
assert_array_equal([], in1d([], []))
+ def test_in1d_invert(self):
+ "Test in1d's invert parameter"
+ a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+ a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 5, -1], mask=[0, 0, 1])
+ assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+ assert_array_equal([], in1d([], [], invert=True))
def test_union1d(self):
"Test union1d"
@@ -817,7 +887,6 @@ class TestArraySetOps(TestCase):
#
assert_array_equal([], union1d([], []))
-
def test_setdiff1d(self):
"Test setdiff1d"
a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
@@ -829,7 +898,6 @@ class TestArraySetOps(TestCase):
b = arange(8)
assert_equal(setdiff1d(a, b), array([8, 9]))
-
def test_setdiff1d_char_array(self):
"Test setdiff1d_charray"
a = np.array(['a', 'b', 'c'])
@@ -837,9 +905,6 @@ class TestArraySetOps(TestCase):
assert_array_equal(setdiff1d(a, b), np.array(['c']))
-
-
-
class TestShapeBase(TestCase):
#
def test_atleast2d(self):
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 1e14042b0..9938e55de 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -3,31 +3,33 @@
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
+
"""
-__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
-__revision__ = "$Revision: 3473 $"
-__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
+from __future__ import division, absolute_import, print_function
import sys
+import warnings
+import pickle
+
import numpy as np
+import numpy.ma.testutils
+import numpy.ma as ma
from numpy import recarray
from numpy.core.records import fromrecords as recfromrecords, \
fromarrays as recfromarrays
from numpy.compat import asbytes, asbytes_nested
-
-import numpy.ma.testutils
from numpy.ma.testutils import *
-
-import numpy.ma as ma
from numpy.ma import masked, nomask
-
-import warnings
-from numpy.testing.utils import WarningManager
-
from numpy.ma.mrecords import MaskedRecords, mrecarray, fromarrays, \
fromtextfile, fromrecords, addfield
+
+__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
+__revision__ = "$Revision: 3473 $"
+__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
+
+
#..............................................................................
class TestMRecords(TestCase):
"Base test class for MaskedArrays."
@@ -139,15 +141,11 @@ class TestMRecords(TestCase):
rdata = data.view(MaskedRecords)
val = ma.array([10,20,30], mask=[1,0,0])
#
- warn_ctx = WarningManager()
- warn_ctx.__enter__()
- try:
+ with warnings.catch_warnings():
warnings.simplefilter("ignore")
rdata['num'] = val
assert_equal(rdata.num, val)
assert_equal(rdata.num.mask, [1,0,0])
- finally:
- warn_ctx.__exit__()
def test_set_fields_mask(self):
"Tests setting the mask of a field."
@@ -290,11 +288,10 @@ class TestMRecords(TestCase):
#
def test_pickling(self):
"Test pickling"
- import cPickle
base = self.base.copy()
mrec = base.view(mrecarray)
- _ = cPickle.dumps(mrec)
- mrec_ = cPickle.loads(_)
+ _ = pickle.dumps(mrec)
+ mrec_ = pickle.loads(_)
assert_equal(mrec_.dtype, mrec.dtype)
assert_equal_records(mrec_._data, mrec._data)
assert_equal(mrec_._mask, mrec._mask)
@@ -358,10 +355,10 @@ class TestView(TestCase):
def setUp(self):
(a, b) = (np.arange(10), np.random.rand(10))
ndtype = [('a',np.float), ('b',np.float)]
- arr = np.array(zip(a,b), dtype=ndtype)
+ arr = np.array(list(zip(a,b)), dtype=ndtype)
rec = arr.view(np.recarray)
#
- marr = ma.array(zip(a,b), dtype=ndtype, fill_value=(-9., -99.))
+ marr = ma.array(list(zip(a,b)), dtype=ndtype, fill_value=(-9., -99.))
mrec = fromarrays([a,b], dtype=ndtype, fill_value=(-9., -99.))
mrec.mask[3] = (False, True)
self.data = (mrec, a, b, arr)
@@ -378,7 +375,7 @@ class TestView(TestCase):
ntype = (np.float, 2)
test = mrec.view(ntype)
self.assertTrue(isinstance(test, ma.MaskedArray))
- assert_equal(test, np.array(zip(a,b), dtype=np.float))
+ assert_equal(test, np.array(list(zip(a,b)), dtype=np.float))
self.assertTrue(test[3,1] is ma.masked)
#
def test_view_flexible_type(self):
@@ -502,6 +499,18 @@ class TestMRecordsImport(TestCase):
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
+
+def test_record_array_with_object_field():
+ """
+ Trac #1839
+ """
+ y = ma.masked_array(
+ [(1,'2'), (3, '4')],
+ mask=[(0, 0), (0, 1)],
+ dtype=[('a', int), ('b', np.object)])
+ x = y[1]
+
+
###############################################################################
#------------------------------------------------------------------------------
if __name__ == "__main__":
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 656f0b318..50247df37 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -1,36 +1,37 @@
-import numpy
-import types
+from __future__ import division, absolute_import, print_function
+
+import sys
+from functools import reduce
+
+import numpy as np
from numpy.ma import *
from numpy.core.numerictypes import float32
from numpy.ma.core import umath
from numpy.testing import *
-import sys
-if sys.version_info[0] >= 3:
- from functools import reduce
-pi = numpy.pi
+pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
- print """Not eq:%s
+ print("""Not eq:%s
%s
----
-%s""" % (msg, str(v), str(w))
+%s""" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp (self):
- x = numpy.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
- y = numpy.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
- z = numpy.array([-.5, 0., .5, .8])
+ z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
- xf = numpy.where(m1, 1e+20, x)
+ xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
@@ -88,19 +89,13 @@ class TestMa(TestCase):
self.assertTrue(eq(x + y, xm + ym))
self.assertTrue(eq(x - y, xm - ym))
self.assertTrue(eq(x * y, xm * ym))
- olderr = numpy.seterr(divide='ignore', invalid='ignore')
- try:
+ with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(x / y, xm / ym))
- finally:
- numpy.seterr(**olderr)
self.assertTrue(eq(a10 + y, a10 + ym))
self.assertTrue(eq(a10 - y, a10 - ym))
self.assertTrue(eq(a10 * y, a10 * ym))
- olderr = numpy.seterr(divide='ignore', invalid='ignore')
- try:
+ with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(a10 / y, a10 / ym))
- finally:
- numpy.seterr(**olderr)
self.assertTrue(eq(x + a10, xm + a10))
self.assertTrue(eq(x - a10, xm - a10))
self.assertTrue(eq(x * a10, xm * a10))
@@ -108,18 +103,15 @@ class TestMa(TestCase):
self.assertTrue(eq(x ** 2, xm ** 2))
self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
self.assertTrue(eq(x ** y, xm ** ym))
- self.assertTrue(eq(numpy.add(x, y), add(xm, ym)))
- self.assertTrue(eq(numpy.subtract(x, y), subtract(xm, ym)))
- self.assertTrue(eq(numpy.multiply(x, y), multiply(xm, ym)))
- olderr = numpy.seterr(divide='ignore', invalid='ignore')
- try:
- self.assertTrue(eq(numpy.divide(x, y), divide(xm, ym)))
- finally:
- numpy.seterr(**olderr)
+ self.assertTrue(eq(np.add(x, y), add(xm, ym)))
+ self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
+ self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
- na = numpy.array([1])
+ na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
@@ -127,53 +119,50 @@ class TestMa(TestCase):
def test_testUfuncs1 (self):
"Test various functions such as sin, cos."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue (eq(numpy.cos(x), cos(xm)))
- self.assertTrue (eq(numpy.cosh(x), cosh(xm)))
- self.assertTrue (eq(numpy.sin(x), sin(xm)))
- self.assertTrue (eq(numpy.sinh(x), sinh(xm)))
- self.assertTrue (eq(numpy.tan(x), tan(xm)))
- self.assertTrue (eq(numpy.tanh(x), tanh(xm)))
- olderr = numpy.seterr(divide='ignore', invalid='ignore')
- try:
- self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm)))
- self.assertTrue (eq(numpy.log(abs(x)), log(xm)))
- self.assertTrue (eq(numpy.log10(abs(x)), log10(xm)))
- finally:
- numpy.seterr(**olderr)
- self.assertTrue (eq(numpy.exp(x), exp(xm)))
- self.assertTrue (eq(numpy.arcsin(z), arcsin(zm)))
- self.assertTrue (eq(numpy.arccos(z), arccos(zm)))
- self.assertTrue (eq(numpy.arctan(z), arctan(zm)))
- self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
- self.assertTrue (eq(numpy.absolute(x), absolute(xm)))
- self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym)))
- self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym)))
- self.assertTrue (eq(numpy.less(x, y), less(xm, ym)))
- self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym)))
- self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym)))
- self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym)))
- self.assertTrue (eq(numpy.conjugate(x), conjugate(xm)))
- self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym))))
- self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y))))
- self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y))))
- self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x))))
+ self.assertTrue (eq(np.cos(x), cos(xm)))
+ self.assertTrue (eq(np.cosh(x), cosh(xm)))
+ self.assertTrue (eq(np.sin(x), sin(xm)))
+ self.assertTrue (eq(np.sinh(x), sinh(xm)))
+ self.assertTrue (eq(np.tan(x), tan(xm)))
+ self.assertTrue (eq(np.tanh(x), tanh(xm)))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ self.assertTrue (eq(np.sqrt(abs(x)), sqrt(xm)))
+ self.assertTrue (eq(np.log(abs(x)), log(xm)))
+ self.assertTrue (eq(np.log10(abs(x)), log10(xm)))
+ self.assertTrue (eq(np.exp(x), exp(xm)))
+ self.assertTrue (eq(np.arcsin(z), arcsin(zm)))
+ self.assertTrue (eq(np.arccos(z), arccos(zm)))
+ self.assertTrue (eq(np.arctan(z), arctan(zm)))
+ self.assertTrue (eq(np.arctan2(x, y), arctan2(xm, ym)))
+ self.assertTrue (eq(np.absolute(x), absolute(xm)))
+ self.assertTrue (eq(np.equal(x, y), equal(xm, ym)))
+ self.assertTrue (eq(np.not_equal(x, y), not_equal(xm, ym)))
+ self.assertTrue (eq(np.less(x, y), less(xm, ym)))
+ self.assertTrue (eq(np.greater(x, y), greater(xm, ym)))
+ self.assertTrue (eq(np.less_equal(x, y), less_equal(xm, ym)))
+ self.assertTrue (eq(np.greater_equal(x, y), greater_equal(xm, ym)))
+ self.assertTrue (eq(np.conjugate(x), conjugate(xm)))
+ self.assertTrue (eq(np.concatenate((x, y)), concatenate((xm, ym))))
+ self.assertTrue (eq(np.concatenate((x, y)), concatenate((x, y))))
+ self.assertTrue (eq(np.concatenate((x, y)), concatenate((xm, y))))
+ self.assertTrue (eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount (self):
"Test count"
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
if sys.version_info[0] >= 3:
- self.assertTrue(isinstance(count(ott), numpy.integer))
+ self.assertTrue(isinstance(count(ott), np.integer))
else:
- self.assertTrue(isinstance(count(ott), types.IntType))
+ self.assertTrue(isinstance(count(ott), int))
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.assertTrue (eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
- assert_(isinstance(count(ott, 0), numpy.ndarray))
+ assert_(isinstance(count(ott, 0), np.ndarray))
if sys.version_info[0] >= 3:
- assert_(isinstance(count(ott), numpy.integer))
+ assert_(isinstance(count(ott), np.integer))
else:
- assert_(isinstance(count(ott), types.IntType))
+ assert_(isinstance(count(ott), int))
self.assertTrue (eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
self.assertTrue (eq([1, 2], count(ott, 0)))
@@ -181,7 +170,7 @@ class TestMa(TestCase):
def test_testMinMax (self):
"Test minimum and maximum."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- xr = numpy.ravel(x) #max doesn't work if shaped
+ xr = np.ravel(x) #max doesn't work if shaped
xmr = ravel(xm)
#true because of careful selection of data
@@ -193,34 +182,34 @@ class TestMa(TestCase):
def test_testAddSumProd (self):
"Test add, sum, product."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue (eq(numpy.add.reduce(x), add.reduce(x)))
- self.assertTrue (eq(numpy.add.accumulate(x), add.accumulate(x)))
+ self.assertTrue (eq(np.add.reduce(x), add.reduce(x)))
+ self.assertTrue (eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue (eq(4, sum(array(4), axis=0)))
self.assertTrue (eq(4, sum(array(4), axis=0)))
- self.assertTrue (eq(numpy.sum(x, axis=0), sum(x, axis=0)))
- self.assertTrue (eq(numpy.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
- self.assertTrue (eq(numpy.sum(x, 0), sum(x, 0)))
- self.assertTrue (eq(numpy.product(x, axis=0), product(x, axis=0)))
- self.assertTrue (eq(numpy.product(x, 0), product(x, 0)))
- self.assertTrue (eq(numpy.product(filled(xm, 1), axis=0),
+ self.assertTrue (eq(np.sum(x, axis=0), sum(x, axis=0)))
+ self.assertTrue (eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
+ self.assertTrue (eq(np.sum(x, 0), sum(x, 0)))
+ self.assertTrue (eq(np.product(x, axis=0), product(x, axis=0)))
+ self.assertTrue (eq(np.product(x, 0), product(x, 0)))
+ self.assertTrue (eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
- self.assertTrue (eq(numpy.concatenate((x, y), 1),
+ self.assertTrue (eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
- self.assertTrue (eq(numpy.add.reduce(x, 1), add.reduce(x, 1)))
- self.assertTrue (eq(numpy.sum(x, 1), sum(x, 1)))
- self.assertTrue (eq(numpy.product(x, 1), product(x, 1)))
+ self.assertTrue (eq(np.add.reduce(x, 1), add.reduce(x, 1)))
+ self.assertTrue (eq(np.sum(x, 1), sum(x, 1)))
+ self.assertTrue (eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
"Test of conversions and indexing"
- x1 = numpy.array([1, 2, 4, 3])
+ x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
- assert_(eq(numpy.sort(x1), sort(x2, fill_value=0)))
+ assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
@@ -247,13 +236,13 @@ class TestMa(TestCase):
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
- x1 = numpy.arange(5) * 1.0
+ x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
- x2 = numpy.array([1, 'hello', 2, 3], object)
+ x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
self.assertEqual(type(s2), str)
@@ -270,7 +259,7 @@ class TestMa(TestCase):
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
- x1 = numpy.arange(5)
+ x1 = np.arange(5)
y1 = array(x1, mask=m)
self.assertTrue(y1._data is not x1)
self.assertTrue(allequal(x1, y1._data))
@@ -323,7 +312,7 @@ class TestMa(TestCase):
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
- i = numpy.nonzero(m)[0]
+ i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
@@ -386,13 +375,13 @@ class TestMa(TestCase):
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
- assert_(eq(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4]))
- assert_(eq(masked_outside(range(5), 1, 3), [199, 1, 2, 3, 199]))
- assert_(eq(masked_inside(array(range(5), mask=[1, 0, 0, 0, 0]), 1, 3).mask,
+ assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
+ assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
+ assert_(eq(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
- assert_(eq(masked_outside(array(range(5), mask=[0, 1, 0, 0, 0]), 1, 3).mask,
+ assert_(eq(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
- assert_(eq(masked_equal(array(range(5), mask=[1, 0, 0, 0, 0]), 2).mask,
+ assert_(eq(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
@@ -422,7 +411,7 @@ class TestMa(TestCase):
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
- def test_testMinMax(self):
+ def test_testMinMax2(self):
"Test of minumum, maximum."
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
@@ -438,15 +427,15 @@ class TestMa(TestCase):
def test_testTakeTransposeInnerOuter(self):
"Test of take, transpose, inner, outer products"
x = arange(24)
- y = numpy.arange(24)
+ y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
- assert_(eq(numpy.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
- assert_(eq(numpy.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
- assert_(eq(numpy.inner(filled(x, 0), filled(y, 0)),
+ assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
+ assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
+ assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
- assert_(eq(numpy.outer(filled(x, 0), filled(y, 0)),
+ assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
@@ -556,8 +545,8 @@ class TestMa(TestCase):
self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
self.assertTrue(allclose(average(y, None),
- numpy.add.reduce(numpy.arange(6)) * 3. / 12.))
- self.assertTrue(allclose(average(y, axis=0), numpy.arange(6) * 3. / 2.))
+ np.add.reduce(np.arange(6)) * 3. / 12.))
+ self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0) * 2.0]))
self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
@@ -616,12 +605,9 @@ class TestMa(TestCase):
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
- err_status = numpy.geterr()
- try:
- numpy.seterr(divide='ignore')
+ with np.errstate():
+ np.seterr(divide='ignore')
self.assertTrue((1 / array(0)).mask)
- finally:
- numpy.seterr(**err_status)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue((-xm).mask)
@@ -655,7 +641,7 @@ class TestMa(TestCase):
self.assertEqual(a.ndim, 1)
def test_testAPI(self):
- self.assertFalse([m for m in dir(numpy.ndarray)
+ self.assertFalse([m for m in dir(np.ndarray)
if m not in dir(MaskedArray) and not m.startswith('_')])
def test_testSingleElementSubscript(self):
@@ -698,18 +684,15 @@ class TestUfuncs(TestCase):
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
- mf = getattr(numpy.ma, f)
+ mf = getattr(np.ma, f)
args = self.d[:uf.nin]
- olderr = numpy.geterr()
- try:
+ with np.errstate():
if f in f_invalid_ignore:
- numpy.seterr(invalid='ignore')
+ np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
- numpy.seterr(divide='ignore')
+ np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
- finally:
- numpy.seterr(**olderr)
self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
self.assertTrue(eqmask(ur.mask, mr.mask))
@@ -739,7 +722,7 @@ class TestUfuncs(TestCase):
class TestArrayMethods(TestCase):
def setUp(self):
- x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
+ x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
@@ -748,7 +731,7 @@ class TestArrayMethods(TestCase):
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
- m = numpy.array([0, 1, 0, 1, 0, 0,
+ m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
@@ -758,7 +741,7 @@ class TestArrayMethods(TestCase):
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
- m2 = numpy.array([1, 1, 0, 1, 0, 0,
+ m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
@@ -788,8 +771,8 @@ class TestArrayMethods(TestCase):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
self.assertEqual(mx.ptp(), mx.compressed().ptp())
- rows = numpy.zeros(n, numpy.float_)
- cols = numpy.zeros(m, numpy.float_)
+ rows = np.zeros(n, np.float_)
+ cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
@@ -829,7 +812,7 @@ class TestArrayMethods(TestCase):
for k in range(6):
self.assertTrue(eq(mXvar1[k], mX[k].compressed().var()))
self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var()))
- self.assertTrue(eq(numpy.sqrt(mXvar0[k]),
+ self.assertTrue(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
@@ -855,7 +838,7 @@ def eqmask(m1, m2):
#""" % (n, t*1000.0, t1/t, t2/t)
#def testta(n, f):
-# x=numpy.arange(n) + 1.0
+# x=np.arange(n) + 1.0
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 62e4ee0ae..eb301aa05 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -1,5 +1,9 @@
-from numpy.testing import *
+from __future__ import division, absolute_import, print_function
+
import numpy as np
+import numpy.ma as ma
+from numpy.testing import *
+from numpy.compat import sixu
rlevel = 1
@@ -35,7 +39,7 @@ class TestRegression(TestCase):
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
- repr(np.ma.array(u"Unicode"))
+ repr(np.ma.array(sixu("Unicode")))
def test_atleast_2d(self):
"""Ticket #1559"""
@@ -44,6 +48,27 @@ class TestRegression(TestCase):
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
+ def test_set_fill_value_unicode_py3(self):
+ """Ticket #2733"""
+ a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
+ a.fill_value = 'X'
+ assert_(a.fill_value == 'X')
+
+ def test_var_sets_maskedarray_scalar(self):
+ """Issue gh-2757"""
+ a = np.ma.array(np.arange(5), mask=True)
+ mout = np.ma.array(-1, dtype=float)
+ a.var(out=mout)
+ assert_(mout._data == 0)
+
+ def test_ddof_corrcoef(self):
+ # See gh-3336
+ x = np.ma.masked_equal([1,2,3,4,5], 4)
+ y = np.array([2,2.5,3.1,3,5])
+ r0 = np.ma.corrcoef(x, y, ddof=0)
+ r1 = np.ma.corrcoef(x, y, ddof=1)
+ # ddof should not have an effect (it gets cancelled out)
+ assert_allclose(r0.data, r1.data)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index fb72ca773..7014246d4 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -4,7 +4,10 @@
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
@@ -93,12 +96,9 @@ class TestSubclassing(TestCase):
def test_masked_unary_operations(self):
"Tests masked_unary_operation"
(x, mx) = self.data
- olderr = np.seterr(divide='ignore')
- try:
+ with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
- finally:
- np.seterr(**olderr)
def test_masked_binary_operations(self):
"Tests masked_binary_operation"
@@ -113,7 +113,7 @@ class TestSubclassing(TestCase):
self.assertTrue(isinstance(hypot(mx,mx), mmatrix))
self.assertTrue(isinstance(hypot(mx,x), mmatrix))
- def test_masked_binary_operations(self):
+ def test_masked_binary_operations2(self):
"Tests domained_masked_binary_operation"
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index 4fb76b330..feff3e879 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -3,7 +3,10 @@
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $
+
"""
+from __future__ import division, absolute_import, print_function
+
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = "1.0"
__revision__ = "$Revision: 3529 $"
@@ -18,7 +21,7 @@ import numpy.core.umath as umath
from numpy.testing import *
import numpy.testing.utils as utils
-from core import mask_or, getmask, masked_array, nomask, masked, filled, \
+from .core import mask_or, getmask, masked_array, nomask, masked, filled, \
equal, less
#------------------------------------------------------------------------------
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index 2588f53b2..350412b85 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -1,18 +1,19 @@
+from __future__ import division, absolute_import, print_function
+
import timeit
+from functools import reduce
-import sys
import numpy as np
from numpy import float_
import np.core.fromnumeric as fromnumeric
from np.testing.utils import build_err_msg
+# Fixme: this does not look right.
np.seterr(all='ignore')
pi = np.pi
-if sys.version_info[0] >= 3:
- from functools import reduce
class moduletester(object):
def __init__(self, module):
@@ -450,9 +451,9 @@ if __name__ == '__main__':
cur = np.sort(cur)
# alt = np.sort(alt)
# tmp = np.sort(tmp)
- print "#%i" % i +50*'.'
- print eval("moduletester.test_%i.__doc__" % i)
+ print("#%i" % i +50*'.')
+ print(eval("moduletester.test_%i.__doc__" % i))
# print "core_ini : %.3f - %.3f" % (new[0], new[1])
- print "core_current : %.3f - %.3f" % (cur[0], cur[1])
+ print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
# print "core_alt : %.3f - %.3f" % (alt[0], alt[1])
# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1])
diff --git a/numpy/ma/version.py b/numpy/ma/version.py
index 7a925f1a8..a2c5c42a8 100644
--- a/numpy/ma/version.py
+++ b/numpy/ma/version.py
@@ -1,11 +1,14 @@
-"""Version number"""
+"""Version number
+
+"""
+from __future__ import division, absolute_import, print_function
version = '1.00'
release = False
if not release:
- import core
- import extras
+ from . import core
+ from . import extras
revision = [core.__revision__.split(':')[-1][:-1].strip(),
extras.__revision__.split(':')[-1][:-1].strip(),]
version += '.dev%04i' % max([int(rev) for rev in revision])
diff --git a/numpy/matlib.py b/numpy/matlib.py
index f55f763c3..5a1ebe14a 100644
--- a/numpy/matlib.py
+++ b/numpy/matlib.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index 468a8829d..d20696154 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -1,5 +1,9 @@
-"""Sub-package containing the matrix class and related functions."""
-from defmatrix import *
+"""Sub-package containing the matrix class and related functions.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+from .defmatrix import *
__all__ = defmatrix.__all__
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index dbf2909fd..1ca835af2 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
@@ -17,7 +19,12 @@ if sys.version_info[0] >= 3:
return None
_table = _NumCharTable()
def _eval(astr):
- return eval(astr.translate(_table))
+ str_ = astr.translate(_table)
+ if not str_:
+ raise TypeError("Invalid data string supplied: " + astr)
+ else:
+ return eval(str_)
+
else:
_table = [None]*256
for k in range(256):
@@ -32,7 +39,11 @@ else:
del k
def _eval(astr):
- return eval(astr.translate(_table,_todelete))
+ str_ = astr.translate(_table,_todelete)
+ if not str_:
+ raise TypeError("Invalid data string supplied: " + astr)
+ else:
+ return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
@@ -43,7 +54,7 @@ def _convert_from_string(data):
newrow = []
for col in trow:
temp = col.split()
- newrow.extend(map(_eval,temp))
+ newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
@@ -586,9 +597,9 @@ class matrix(N.ndarray):
Parameters
----------
- axis: int, optional
+ axis : int, optional
Axis along which logical OR is performed
- out: ndarray, optional
+ out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
diff --git a/numpy/matrixlib/setup.py b/numpy/matrixlib/setup.py
index 85b090094..8c383cece 100644
--- a/numpy/matrixlib/setup.py
+++ b/numpy/matrixlib/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
import os
def configuration(parent_package='', top_path=None):
diff --git a/numpy/matrixlib/setupscons.py b/numpy/matrixlib/setupscons.py
deleted file mode 100644
index 85b090094..000000000
--- a/numpy/matrixlib/setupscons.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import os
-
-def configuration(parent_package='', top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('matrixlib', parent_package, top_path)
- config.add_data_dir('tests')
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- config = configuration(top_path='').todict()
- setup(**config)
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index 0a181fca3..7cfcdbe27 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -1,9 +1,12 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
from numpy.core import *
from numpy import matrix, asmatrix, bmat
from numpy.matrixlib.defmatrix import matrix_power
from numpy.matrixlib import mat
import numpy as np
+import collections
class TestCtor(TestCase):
def test_basic(self):
@@ -28,6 +31,10 @@ class TestCtor(TestCase):
mvec = matrix(vec)
assert_(mvec.shape == (1,5))
+ def test_exceptions(self):
+ # Check for TypeError when called with invalid string data.
+ assert_raises(TypeError, matrix, "invalid")
+
def test_bmat_nondefault_str(self):
A = array([[1,2],[3,4]])
B = array([[5,6],[7,8]])
@@ -208,13 +215,13 @@ class TestAlgebra(TestCase):
mA = matrix(A)
B = identity(2)
- for i in xrange(6):
+ for i in range(6):
assert_(allclose((mA ** i).A, B))
B = dot(B, A)
Ainv = linalg.inv(A)
B = identity(2)
- for i in xrange(6):
+ for i in range(6):
assert_(allclose((mA ** -i).A, B))
B = dot(B, Ainv)
@@ -276,8 +283,9 @@ class TestMatrixReturn(TestCase):
excluded_methods = [
'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',
'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',
- 'searchsorted', 'setflags', 'setfield', 'sort', 'take',
- 'tofile', 'tolist', 'tostring', 'all', 'any', 'sum',
+ 'searchsorted', 'setflags', 'setfield', 'sort',
+ 'partition', 'argpartition',
+ 'take', 'tofile', 'tolist', 'tostring', 'all', 'any', 'sum',
'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',
'prod', 'std', 'ctypes', 'itemset', 'setasflat'
]
@@ -285,7 +293,7 @@ class TestMatrixReturn(TestCase):
if attrib.startswith('_') or attrib in excluded_methods:
continue
f = getattr(a, attrib)
- if callable(f):
+ if isinstance(f, collections.Callable):
# reset contents of a
a.astype('f8')
a.fill(1.0)
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index 9ef105aa3..bed055615 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import *
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index 0b96bb05a..940bfc5d9 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import assert_equal, TestCase
from numpy.core import ones
from numpy import matrix
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index ba0133dfd..27cff2c21 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
import numpy as np
diff --git a/numpy/numarray/SConscript b/numpy/numarray/SConscript
deleted file mode 100644
index f2d18a400..000000000
--- a/numpy/numarray/SConscript
+++ /dev/null
@@ -1,7 +0,0 @@
-# Last Change: Thu Jun 12 06:00 PM 2008 J
-# vim:syntax=python
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Prepend(CPPPATH=['numpy', "#$build_prefix/numpy/core/src/private"])
-env.NumpyPythonExtension('_capi', source = ['_capi.c'])
diff --git a/numpy/numarray/SConstruct b/numpy/numarray/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/numarray/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/numarray/__init__.py b/numpy/numarray/__init__.py
index 441ec4450..468324ced 100644
--- a/numpy/numarray/__init__.py
+++ b/numpy/numarray/__init__.py
@@ -1,16 +1,24 @@
-from util import *
-from numerictypes import *
-from functions import *
-from ufuncs import *
-from compat import *
-from session import *
+from __future__ import division, absolute_import, print_function
-import util
-import numerictypes
-import functions
-import ufuncs
-import compat
-import session
+import warnings
+from numpy import ModuleDeprecationWarning
+
+from .util import *
+from .numerictypes import *
+from .functions import *
+from .ufuncs import *
+from .compat import *
+from .session import *
+
+from . import util
+from . import numerictypes
+from . import functions
+from . import ufuncs
+from . import compat
+from . import session
+
+_msg = "The numarray module will be dropped in Numpy 1.9"
+warnings.warn(_msg, ModuleDeprecationWarning)
__all__ = ['session', 'numerictypes']
__all__ += util.__all__
diff --git a/numpy/numarray/_capi.c b/numpy/numarray/_capi.c
index 032379515..7320537a3 100644
--- a/numpy/numarray/_capi.c
+++ b/numpy/numarray/_capi.c
@@ -996,9 +996,7 @@ static PyTypeObject CfuncType = {
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
-#if PY_VERSION_HEX >= 0x02060000
0, /* tp_version_tag */
-#endif
};
/* CfuncObjects are created at the c-level only. They ensure that each
@@ -2250,11 +2248,9 @@ _NA_maxType(PyObject *seq, int limit)
}
return maxtype;
} else {
-#if PY_VERSION_HEX >= 0x02030000
if (PyBool_Check(seq))
return BOOL_SCALAR;
else
-#endif
#if defined(NPY_PY3K)
if (PyInt_Check(seq))
return INT_SCALAR;
diff --git a/numpy/numarray/alter_code1.py b/numpy/numarray/alter_code1.py
index ae950e7e0..a80a5ae3c 100644
--- a/numpy/numarray/alter_code1.py
+++ b/numpy/numarray/alter_code1.py
@@ -52,6 +52,8 @@ Makes the following changes:
- .setimaginary() --> .imag
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['convertfile', 'convertall', 'converttree', 'convertsrc']
import sys
@@ -79,7 +81,7 @@ def changeimports(fstr, name, newname):
ind = 0
Nlen = len(fromstr)
Nlen2 = len("from %s import " % newname)
- while 1:
+ while True:
found = fstr.find(fromstr,ind)
if (found < 0):
break
diff --git a/numpy/numarray/alter_code2.py b/numpy/numarray/alter_code2.py
index 4bb773850..18c604afb 100644
--- a/numpy/numarray/alter_code2.py
+++ b/numpy/numarray/alter_code2.py
@@ -5,7 +5,8 @@ with numpy
FIXME: finish this.
"""
-#__all__ = ['convertfile', 'convertall', 'converttree']
+from __future__ import division, absolute_import, print_function
+
__all__ = []
import warnings
diff --git a/numpy/numarray/compat.py b/numpy/numarray/compat.py
index e0d13a7c2..2289af04c 100644
--- a/numpy/numarray/compat.py
+++ b/numpy/numarray/compat.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['NewAxis', 'ArrayType']
diff --git a/numpy/numarray/convolve.py b/numpy/numarray/convolve.py
index 68a4730a1..b2ff3bd13 100644
--- a/numpy/numarray/convolve.py
+++ b/numpy/numarray/convolve.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
try:
from stsci.convolve import *
except ImportError:
diff --git a/numpy/numarray/fft.py b/numpy/numarray/fft.py
index c7ac6a27e..407ad7c5b 100644
--- a/numpy/numarray/fft.py
+++ b/numpy/numarray/fft.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from numpy.oldnumeric.fft import *
import numpy.oldnumeric.fft as nof
diff --git a/numpy/numarray/functions.py b/numpy/numarray/functions.py
index 1c2141c98..78d05e5f5 100644
--- a/numpy/numarray/functions.py
+++ b/numpy/numarray/functions.py
@@ -1,18 +1,6 @@
+from __future__ import division, absolute_import, print_function
+
# missing Numarray defined names (in from numarray import *)
-##__all__ = ['ClassicUnpickler', 'Complex32_fromtype',
-## 'Complex64_fromtype', 'ComplexArray', 'Error',
-## 'MAX_ALIGN', 'MAX_INT_SIZE', 'MAX_LINE_WIDTH',
-## 'NDArray', 'NewArray', 'NumArray',
-## 'NumError', 'PRECISION', 'Py2NumType',
-## 'PyINT_TYPES', 'PyLevel2Type', 'PyNUMERIC_TYPES', 'PyREAL_TYPES',
-## 'SUPPRESS_SMALL',
-## 'SuitableBuffer', 'USING_BLAS',
-## 'UsesOpPriority',
-## 'codegenerator', 'generic', 'libnumarray', 'libnumeric',
-## 'make_ufuncs', 'memory',
-## 'numarrayall', 'numarraycore', 'numinclude', 'safethread',
-## 'typecode', 'typecodes', 'typeconv', 'ufunc', 'ufuncFactory',
-## 'ieeemask']
__all__ = ['asarray', 'ones', 'zeros', 'array', 'where']
__all__ += ['vdot', 'dot', 'matrixmultiply', 'ravel', 'indices',
@@ -37,25 +25,26 @@ __all__ += ['vdot', 'dot', 'matrixmultiply', 'ravel', 'indices',
]
import copy
-import copy_reg
import types
import os
import sys
import math
import operator
+import numpy as np
from numpy import dot as matrixmultiply, dot, vdot, ravel, concatenate, all,\
allclose, any, argsort, array_equal, array_equiv,\
array_str, array_repr, CLIP, RAISE, WRAP, clip, concatenate, \
diagonal, e, pi, inner as innerproduct, nonzero, \
outer as outerproduct, kron as kroneckerproduct, lexsort, putmask, rank, \
resize, searchsorted, shape, size, sort, swapaxes, trace, transpose
-import numpy as np
-
-from numerictypes import typefrom
+from numpy.compat import long
+from .numerictypes import typefrom
if sys.version_info[0] >= 3:
import copyreg as copy_reg
+else:
+ import copy_reg
isBigEndian = sys.byteorder != 'little'
value = tcode = 'f'
@@ -167,7 +156,7 @@ class FileSeekWarning(Warning):
pass
-STRICT, SLOPPY, WARN = range(3)
+STRICT, SLOPPY, WARN = list(range(3))
_BLOCKSIZE=1024
@@ -216,8 +205,8 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT,
##file whose size may be determined before allocation, should be
##quick -- only one allocation will be needed.
- recsize = dtype.itemsize * np.product([i for i in shape if i != -1])
- blocksize = max(_BLOCKSIZE/recsize, 1)*recsize
+ recsize = int(dtype.itemsize * np.product([i for i in shape if i != -1]))
+ blocksize = max(_BLOCKSIZE//recsize, 1)*recsize
##try to estimate file size
try:
@@ -228,12 +217,12 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT,
except (AttributeError, IOError):
initsize=blocksize
else:
- initsize=max(1,(endpos-curpos)/recsize)*recsize
+ initsize=max(1,(endpos-curpos)//recsize)*recsize
buf = np.newbuffer(initsize)
bytesread=0
- while 1:
+ while True:
data=infile.read(blocksize)
if len(data) != blocksize: ##eof
break
@@ -259,20 +248,33 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT,
except IOError:
_warnings.warn("Could not rewind (IOError in seek)",
FileSeekWarning)
- datasize = (len(data)/recsize) * recsize
+ datasize = (len(data)//recsize) * recsize
if len(buf) != bytesread+datasize:
buf=_resizebuf(buf,bytesread+datasize)
buf[bytesread:bytesread+datasize]=data[:datasize]
##deduce shape from len(buf)
shape = list(shape)
uidx = shape.index(-1)
- shape[uidx]=len(buf) / recsize
+ shape[uidx]=len(buf) // recsize
a = np.ndarray(shape=shape, dtype=type, buffer=buf)
if a.dtype.char == '?':
np.not_equal(a, 0, a)
return a
+
+# this function is referenced in the code above but not defined. adding
+# it back. - phensley
+def _resizebuf(buf,newsize):
+ "Return a copy of BUF of size NEWSIZE."
+ newbuf = np.newbuffer(newsize)
+ if newsize > len(buf):
+ newbuf[:len(buf)]=buf
+ else:
+ newbuf[:]=buf[:len(newbuf)]
+ return newbuf
+
+
def fromstring(datastring, type=None, shape=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
@@ -360,47 +362,47 @@ def info(obj, output=sys.stdout, numpy=0):
nm = getattr(cls, '__name__', cls)
else:
nm = cls
- print >> output, "class: ", nm
- print >> output, "shape: ", obj.shape
+ print("class: ", nm, file=output)
+ print("shape: ", obj.shape, file=output)
strides = obj.strides
- print >> output, "strides: ", strides
+ print("strides: ", strides, file=output)
if not numpy:
- print >> output, "byteoffset: 0"
+ print("byteoffset: 0", file=output)
if len(strides) > 0:
bs = obj.strides[0]
else:
bs = obj.itemsize
- print >> output, "bytestride: ", bs
- print >> output, "itemsize: ", obj.itemsize
- print >> output, "aligned: ", bp(obj.flags.aligned)
- print >> output, "contiguous: ", bp(obj.flags.contiguous)
+ print("bytestride: ", bs, file=output)
+ print("itemsize: ", obj.itemsize, file=output)
+ print("aligned: ", bp(obj.flags.aligned), file=output)
+ print("contiguous: ", bp(obj.flags.contiguous), file=output)
if numpy:
- print >> output, "fortran: ", obj.flags.fortran
+ print("fortran: ", obj.flags.fortran, file=output)
if not numpy:
- print >> output, "buffer: ", repr(obj.data)
+ print("buffer: ", repr(obj.data), file=output)
if not numpy:
extra = " (DEBUG ONLY)"
tic = "'"
else:
extra = ""
tic = ""
- print >> output, "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra)
- print >> output, "byteorder: ",
+ print("data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output)
+ print("byteorder: ", end=' ', file=output)
endian = obj.dtype.byteorder
if endian in ['|','=']:
- print >> output, "%s%s%s" % (tic, sys.byteorder, tic)
+ print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
- print >> output, "%sbig%s" % (tic, tic)
+ print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
- print >> output, "%slittle%s" % (tic, tic)
+ print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
- print >> output, "byteswap: ", bp(byteswap)
+ print("byteswap: ", bp(byteswap), file=output)
if not numpy:
- print >> output, "type: ", typefrom(obj).name
+ print("type: ", typefrom(obj).name, file=output)
else:
- print >> output, "type: %s" % obj.dtype
+ print("type: %s" % obj.dtype, file=output)
#clipmode is ignored if axis is not 0 and array is not 1d
def put(array, indices, values, axis=0, clipmode=RAISE):
@@ -417,7 +419,7 @@ def put(array, indices, values, axis=0, clipmode=RAISE):
work[indices] = values
work = work.swapaxes(0, axis)
else:
- def_axes = range(work.ndim)
+ def_axes = list(range(work.ndim))
for x in axis:
def_axes.remove(x)
axis = list(axis)+def_axes
@@ -453,7 +455,7 @@ def take(array, indices, axis=0, outarr=None, clipmode=RAISE):
return res
return
else:
- def_axes = range(array.ndim)
+ def_axes = list(range(array.ndim))
for x in axis:
def_axes.remove(x)
axis = list(axis) + def_axes
diff --git a/numpy/numarray/image.py b/numpy/numarray/image.py
index 323528905..063e51376 100644
--- a/numpy/numarray/image.py
+++ b/numpy/numarray/image.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
try:
from stsci.image import *
except ImportError:
diff --git a/numpy/numarray/linear_algebra.py b/numpy/numarray/linear_algebra.py
index 238dff952..6cf22329e 100644
--- a/numpy/numarray/linear_algebra.py
+++ b/numpy/numarray/linear_algebra.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from numpy.oldnumeric.linear_algebra import *
diff --git a/numpy/numarray/ma.py b/numpy/numarray/ma.py
index 5c7a19cf2..dbfd3d2f5 100644
--- a/numpy/numarray/ma.py
+++ b/numpy/numarray/ma.py
@@ -1,2 +1,3 @@
+from __future__ import division, absolute_import, print_function
from numpy.oldnumeric.ma import *
diff --git a/numpy/numarray/matrix.py b/numpy/numarray/matrix.py
index 86d79bbe2..e88fc086f 100644
--- a/numpy/numarray/matrix.py
+++ b/numpy/numarray/matrix.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['Matrix']
diff --git a/numpy/numarray/mlab.py b/numpy/numarray/mlab.py
index 05f234d37..c5b9194ee 100644
--- a/numpy/numarray/mlab.py
+++ b/numpy/numarray/mlab.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
from numpy.oldnumeric.mlab import *
import numpy.oldnumeric.mlab as nom
diff --git a/numpy/numarray/nd_image.py b/numpy/numarray/nd_image.py
index dff7fa066..224d2ba23 100644
--- a/numpy/numarray/nd_image.py
+++ b/numpy/numarray/nd_image.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
try:
from ndimage import *
except ImportError:
diff --git a/numpy/numarray/numerictypes.py b/numpy/numarray/numerictypes.py
index 70a134855..21685c34d 100644
--- a/numpy/numarray/numerictypes.py
+++ b/numpy/numarray/numerictypes.py
@@ -25,7 +25,12 @@ Exported symbols include:
ComplexType
$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $
+
"""
+from __future__ import division, absolute_import, print_function
+
+import numpy
+from numpy.compat import long
__all__ = ['NumericType','HasUInt64','typeDict','IsType',
'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType',
@@ -42,7 +47,6 @@ __all__ = ['NumericType','HasUInt64','typeDict','IsType',
MAX_ALIGN = 8
MAX_INT_SIZE = 8
-import numpy
LP64 = numpy.intp(0).itemsize == 8
HasUInt64 = 1
diff --git a/numpy/numarray/random_array.py b/numpy/numarray/random_array.py
index d70e2694a..2edb171a2 100644
--- a/numpy/numarray/random_array.py
+++ b/numpy/numarray/random_array.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['ArgumentError', 'F', 'beta', 'binomial', 'chi_square',
'exponential', 'gamma', 'get_seed', 'multinomial',
diff --git a/numpy/numarray/session.py b/numpy/numarray/session.py
index 0982742ab..e40cd4033 100644
--- a/numpy/numarray/session.py
+++ b/numpy/numarray/session.py
@@ -72,6 +72,7 @@ Saved modules are re-imported at load time but any "state" in the module
which is not restored by a simple import is lost.
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['load', 'save']
@@ -119,14 +120,14 @@ def _callers_modules():
g = _callers_globals()
mods = []
for k,v in g.items():
- if type(v) == type(sys):
+ if isinstance(v, type(sys)):
mods.append(getattr(v,"__name__"))
return mods
def _errout(*args):
for a in args:
- print >>sys.stderr, a,
- print >>sys.stderr
+ print(a, end=' ', file=sys.stderr)
+ print(file=sys.stderr)
def _verbose(*args):
if VERBOSE:
@@ -168,7 +169,7 @@ def _loadmodule(module):
s = ""
for i in range(len(modules)):
s = ".".join(modules[:i+1])
- exec "import " + s
+ exec("import " + s)
return sys.modules[module]
class _ObjectProxy(object):
@@ -267,11 +268,11 @@ def save(variables=None, file=SAVEFILE, dictionary=None, verbose=False):
dictionary = _callers_globals()
if variables is None:
- keys = dictionary.keys()
+ keys = list(dictionary.keys())
else:
keys = variables.split(",")
- source_modules = _callers_modules() + sys.modules.keys()
+ source_modules = _callers_modules() + list(sys.modules.keys())
p = pickle.Pickler(file, protocol=2)
@@ -325,13 +326,13 @@ def load(variables=None, file=SAVEFILE, dictionary=None, verbose=False):
dictionary = _callers_globals()
values = []
p = pickle.Unpickler(file)
- while 1:
+ while True:
o = p.load()
if isinstance(o, _SaveSession):
session = dict(zip(o.keys, values))
_verbose("updating dictionary with session variables.")
if variables is None:
- keys = session.keys()
+ keys = list(session.keys())
else:
keys = variables.split(",")
for k in keys:
diff --git a/numpy/numarray/setup.py b/numpy/numarray/setup.py
index 641990217..5c9574917 100644
--- a/numpy/numarray/setup.py
+++ b/numpy/numarray/setup.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
from os.path import join
def configuration(parent_package='',top_path=None):
diff --git a/numpy/numarray/setupscons.py b/numpy/numarray/setupscons.py
deleted file mode 100644
index 173612ae8..000000000
--- a/numpy/numarray/setupscons.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from os.path import join
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('numarray',parent_package,top_path)
-
- config.add_data_files('include/numpy/')
- config.add_sconscript('SConstruct', source_files = ['_capi.c'])
-
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/numarray/ufuncs.py b/numpy/numarray/ufuncs.py
index 3fb5671ce..57fc58c54 100644
--- a/numpy/numarray/ufuncs.py
+++ b/numpy/numarray/ufuncs.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_not',
diff --git a/numpy/numarray/util.py b/numpy/numarray/util.py
index 9555474a8..b8e37a28d 100644
--- a/numpy/numarray/util.py
+++ b/numpy/numarray/util.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import numpy as np
@@ -21,22 +23,22 @@ def handleError(errorStatus, sourcemsg):
modes = np.geterr()
if errorStatus & np.FPE_INVALID:
if modes['invalid'] == "warn":
- print "Warning: Encountered invalid numeric result(s)", sourcemsg
+ print("Warning: Encountered invalid numeric result(s)", sourcemsg)
if modes['invalid'] == "raise":
raise MathDomainError(sourcemsg)
if errorStatus & np.FPE_DIVIDEBYZERO:
if modes['dividebyzero'] == "warn":
- print "Warning: Encountered divide by zero(s)", sourcemsg
+ print("Warning: Encountered divide by zero(s)", sourcemsg)
if modes['dividebyzero'] == "raise":
raise ZeroDivisionError(sourcemsg)
if errorStatus & np.FPE_OVERFLOW:
if modes['overflow'] == "warn":
- print "Warning: Encountered overflow(s)", sourcemsg
+ print("Warning: Encountered overflow(s)", sourcemsg)
if modes['overflow'] == "raise":
raise NumOverflowError(sourcemsg)
if errorStatus & np.FPE_UNDERFLOW:
if modes['underflow'] == "warn":
- print "Warning: Encountered underflow(s)", sourcemsg
+ print("Warning: Encountered underflow(s)", sourcemsg)
if modes['underflow'] == "raise":
raise UnderflowError(sourcemsg)
diff --git a/numpy/oldnumeric/__init__.py b/numpy/oldnumeric/__init__.py
index 05712c02c..86cdf55f7 100644
--- a/numpy/oldnumeric/__init__.py
+++ b/numpy/oldnumeric/__init__.py
@@ -1,27 +1,37 @@
-# Don't add these to the __all__ variable though
+"""Don't add these to the __all__ variable though
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+
from numpy import *
+_msg = "The oldnumeric module will be dropped in Numpy 1.9"
+warnings.warn(_msg, ModuleDeprecationWarning)
+
+
def _move_axis_to_0(a, axis):
if axis == 0:
return a
n = len(a.shape)
if axis < 0:
axis += n
- axes = range(1, axis+1) + [0,] + range(axis+1, n)
+ axes = list(range(1, axis+1)) + [0,] + list(range(axis+1, n))
return transpose(a, axes)
# Add these
-from compat import *
-from functions import *
-from precision import *
-from ufuncs import *
-from misc import *
-
-import compat
-import precision
-import functions
-import misc
-import ufuncs
+from .compat import *
+from .functions import *
+from .precision import *
+from .ufuncs import *
+from .misc import *
+
+from . import compat
+from . import precision
+from . import functions
+from . import misc
+from . import ufuncs
import numpy
__version__ = numpy.__version__
diff --git a/numpy/oldnumeric/alter_code1.py b/numpy/oldnumeric/alter_code1.py
index 87538a855..34a59a7ca 100644
--- a/numpy/oldnumeric/alter_code1.py
+++ b/numpy/oldnumeric/alter_code1.py
@@ -27,7 +27,10 @@ Makes the following changes:
* Converts uses of type(...) is <type>
isinstance(..., <type>)
+
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['convertfile', 'convertall', 'converttree', 'convertsrc']
import sys
@@ -85,7 +88,7 @@ def changeimports(fstr, name, newname):
ind = 0
Nlen = len(fromstr)
Nlen2 = len("from %s import " % newname)
- while 1:
+ while True:
found = fstr.find(fromstr,ind)
if (found < 0):
break
diff --git a/numpy/oldnumeric/alter_code2.py b/numpy/oldnumeric/alter_code2.py
index baa6b9d26..c163c9565 100644
--- a/numpy/oldnumeric/alter_code2.py
+++ b/numpy/oldnumeric/alter_code2.py
@@ -19,6 +19,8 @@ Makes the following changes:
oldnumeric.random_array, and oldnumeric.fft
"""
+from __future__ import division, absolute_import, print_function
+
#__all__ = ['convertfile', 'convertall', 'converttree']
__all__ = []
@@ -52,7 +54,7 @@ def changeimports(fstr, name, newname):
ind = 0
Nlen = len(fromstr)
Nlen2 = len("from %s import " % newname)
- while 1:
+ while True:
found = fstr.find(fromstr,ind)
if (found < 0):
break
diff --git a/numpy/oldnumeric/array_printer.py b/numpy/oldnumeric/array_printer.py
index 95f3f42c7..9cccc5023 100644
--- a/numpy/oldnumeric/array_printer.py
+++ b/numpy/oldnumeric/array_printer.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['array2string']
diff --git a/numpy/oldnumeric/arrayfns.py b/numpy/oldnumeric/arrayfns.py
index 683ed309d..534ccacf7 100644
--- a/numpy/oldnumeric/arrayfns.py
+++ b/numpy/oldnumeric/arrayfns.py
@@ -1,5 +1,7 @@
-"""Backward compatible with arrayfns from Numeric
+"""Backward compatible with arrayfns from Numeric.
+
"""
+from __future__ import division, absolute_import, print_function
__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask',
'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span',
@@ -20,7 +22,7 @@ def array_set(vals1, indices, vals2):
vals1 = asarray(vals1)
vals2 = asarray(vals2)
if vals1.ndim != vals2.ndim or vals1.ndim < 1:
- raise error, "vals1 and vals2 must have same number of dimensions (>=1)"
+ raise error("vals1 and vals2 must have same number of dimensions (>=1)")
vals1[indices] = vals2
from numpy import digitize
@@ -38,7 +40,7 @@ def interp(y, x, z, typ=None):
if typ == 'f':
return res.astype('f')
- raise error, "incompatible typecode"
+ raise error("incompatible typecode")
def nz(x):
x = asarray(x,dtype=np.ubyte)
diff --git a/numpy/oldnumeric/compat.py b/numpy/oldnumeric/compat.py
index 083a1fa15..841052656 100644
--- a/numpy/oldnumeric/compat.py
+++ b/numpy/oldnumeric/compat.py
@@ -1,4 +1,18 @@
-# Compatibility module containing deprecated names
+"""Compatibility module containing deprecated names.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import copy
+import pickle
+from pickle import dump, dumps
+
+import numpy.core.multiarray as multiarray
+import numpy.core.umath as um
+from numpy.core.numeric import array
+from . import functions
+
__all__ = ['NewAxis',
'UFuncType', 'UfuncType', 'ArrayType', 'arraytype',
@@ -10,13 +24,6 @@ __all__ = ['NewAxis',
'Unpickler', 'Pickler'
]
-import numpy.core.multiarray as multiarray
-import numpy.core.umath as um
-from numpy.core.numeric import array
-import functions
-import sys
-
-from cPickle import dump, dumps
mu = multiarray
@@ -44,8 +51,7 @@ def DumpArray(m, fp):
m.dump(fp)
def LoadArray(fp):
- import cPickle
- return cPickle.load(fp)
+ return pickle.load(fp)
def array_constructor(shape, typecode, thestr, Endian=LittleEndian):
if typecode == "O":
@@ -67,15 +73,14 @@ def pickle_array(a):
(a.shape, a.dtype.char, a.tostring(), LittleEndian))
def loads(astr):
- import cPickle
- arr = cPickle.loads(astr.replace('Numeric', 'numpy.oldnumeric'))
+ arr = pickle.loads(astr.replace('Numeric', 'numpy.oldnumeric'))
return arr
def load(fp):
return loads(fp.read())
def _LoadArray(fp):
- import typeconv
+ from . import typeconv
ln = fp.readline().split()
if ln[0][0] == 'A': ln[0] = ln[0][1:]
typecode = ln[0][0]
@@ -94,7 +99,6 @@ def _LoadArray(fp):
else:
return m
-import pickle, copy
if sys.version_info[0] >= 3:
class Unpickler(pickle.Unpickler):
# XXX: should we implement this? It's not completely straightforward
diff --git a/numpy/oldnumeric/fft.py b/numpy/oldnumeric/fft.py
index 67f30c750..0fd3ae48e 100644
--- a/numpy/oldnumeric/fft.py
+++ b/numpy/oldnumeric/fft.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'fft2d', 'fftnd', 'hermite_fft', 'inverse_fft',
'inverse_fft2d', 'inverse_fftnd',
diff --git a/numpy/oldnumeric/fix_default_axis.py b/numpy/oldnumeric/fix_default_axis.py
index 8483de85e..5f6128724 100644
--- a/numpy/oldnumeric/fix_default_axis.py
+++ b/numpy/oldnumeric/fix_default_axis.py
@@ -32,7 +32,10 @@ cumprod
prod
std
mean
+
"""
+from __future__ import division, absolute_import, print_function
+
__all__ = ['convertfile', 'convertall', 'converttree']
import sys
@@ -182,7 +185,7 @@ def _import_change(fstr, names):
ind = 0
importstr = "from numpy import"
N = len(importstr)
- while 1:
+ while True:
ind = fstr.find(importstr, ind)
if (ind < 0):
break
@@ -258,7 +261,7 @@ def convertfile(filename, import_change=False):
filestr = getfile(filename)
newstr, total = add_axis(filestr, import_change)
if total > 0:
- print "Changing ", filename
+ print("Changing ", filename)
copyfile(filename, filestr)
makenewfile(filename, newstr)
sys.stdout.flush()
diff --git a/numpy/oldnumeric/functions.py b/numpy/oldnumeric/functions.py
index db62f7cb5..c5941fb67 100644
--- a/numpy/oldnumeric/functions.py
+++ b/numpy/oldnumeric/functions.py
@@ -1,9 +1,12 @@
-# Functions that should behave the same as Numeric and need changing
+"""Functions that should behave the same as Numeric and need changing
+
+"""
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.core.multiarray as mu
import numpy.core.numeric as nn
-from typeconv import convtypecode, convtypecode2
+from .typeconv import convtypecode, convtypecode2
__all__ = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue',
'cumsum', 'cumproduct', 'compress', 'fromfunction',
diff --git a/numpy/oldnumeric/linear_algebra.py b/numpy/oldnumeric/linear_algebra.py
index 2e7a264fe..f9938b503 100644
--- a/numpy/oldnumeric/linear_algebra.py
+++ b/numpy/oldnumeric/linear_algebra.py
@@ -1,10 +1,12 @@
"""Backward compatible with LinearAlgebra from Numeric
-"""
-# This module is a lite version of the linalg.py module in SciPy which contains
-# high-level Python interface to the LAPACK library. The lite version
-# only accesses the following LAPACK functions: dgesv, zgesv, dgeev,
-# zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, dpotrf.
+This module is a lite version of the linalg.py module in SciPy which contains
+high-level Python interface to the LAPACK library. The lite version
+only accesses the following LAPACK functions: dgesv, zgesv, dgeev,
+zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, dpotrf.
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['LinAlgError', 'solve_linear_equations',
'inverse', 'cholesky_decomposition', 'eigenvalues',
diff --git a/numpy/oldnumeric/ma.py b/numpy/oldnumeric/ma.py
index 732e8447a..fbc0aca27 100644
--- a/numpy/oldnumeric/ma.py
+++ b/numpy/oldnumeric/ma.py
@@ -1,4 +1,5 @@
"""MA: a facility for dealing with missing observations
+
MA is generally used as a numpy.array look-alike.
by Paul F. Dubois.
@@ -8,18 +9,29 @@ Adapted for numpy_core 2005 by Travis Oliphant and
(mainly) Paul Dubois.
"""
-import types, sys
+from __future__ import division, absolute_import, print_function
+
+import sys
+import types
+import warnings
+from functools import reduce
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
+import numpy.core.numeric as numeric
from numpy.core.numeric import newaxis, ndarray, inf
from numpy.core.fromnumeric import amax, amin
from numpy.core.numerictypes import bool_, typecodes
import numpy.core.numeric as numeric
-import warnings
+from numpy.compat import bytes, long
if sys.version_info[0] >= 3:
- from functools import reduce
+ _MAXINT = sys.maxsize
+ _MININT = -sys.maxsize - 1
+else:
+ _MAXINT = sys.maxint
+ _MININT = -sys.maxint - 1
+
# Ufunc domain lookup for __array_wrap__
ufunc_domain = {}
@@ -83,13 +95,13 @@ default_object_fill_value = '?'
def default_fill_value (obj):
"Function to calculate default fill value for an object."
- if isinstance(obj, types.FloatType):
+ if isinstance(obj, float):
return default_real_fill_value
- elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
+ elif isinstance(obj, int) or isinstance(obj, long):
return default_integer_fill_value
- elif isinstance(obj, types.StringType):
+ elif isinstance(obj, bytes):
return default_character_fill_value
- elif isinstance(obj, types.ComplexType):
+ elif isinstance(obj, complex):
return default_complex_fill_value
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
@@ -109,33 +121,33 @@ def default_fill_value (obj):
def minimum_fill_value (obj):
"Function to calculate default fill value suitable for taking minima."
- if isinstance(obj, types.FloatType):
+ if isinstance(obj, float):
return numeric.inf
- elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
- return sys.maxint
+ elif isinstance(obj, int) or isinstance(obj, long):
+ return _MAXINT
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
if x in typecodes['Float']:
return numeric.inf
if x in typecodes['Integer']:
- return sys.maxint
+ return _MAXINT
if x in typecodes['UnsignedInteger']:
- return sys.maxint
+ return _MAXINT
else:
raise TypeError('Unsuitable type for calculating minimum.')
def maximum_fill_value (obj):
"Function to calculate default fill value suitable for taking maxima."
- if isinstance(obj, types.FloatType):
+ if isinstance(obj, float):
return -inf
- elif isinstance(obj, types.IntType) or isinstance(obj, types.LongType):
- return -sys.maxint
+ elif isinstance(obj, int) or isinstance(obj, long):
+ return -_MAXINT
elif isinstance(obj, MaskedArray) or isinstance(obj, ndarray):
x = obj.dtype.char
if x in typecodes['Float']:
return -inf
if x in typecodes['Integer']:
- return -sys.maxint
+ return -_MAXINT
if x in typecodes['UnsignedInteger']:
return 0
else:
@@ -229,7 +241,7 @@ def filled (a, value = None):
return a.filled(value)
elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']:
return a
- elif isinstance(a, types.DictType):
+ elif isinstance(a, dict):
return numeric.array(a, 'O')
else:
return numeric.array(a)
@@ -863,6 +875,18 @@ array(data = %(data)s,
return bool(m is not nomask and m.any()
or d is not nomask and d.any())
+ def __bool__(self):
+ """returns true if any element is non-zero or masked
+
+ """
+ # XXX: This changes bool conversion logic from MA.
+ # XXX: In MA bool(a) == len(a) != 0, but in numpy
+ # XXX: scalars do not have len
+ m = self._mask
+ d = self._data
+ return bool(m is not nomask and m.any()
+ or d is not nomask and d.any())
+
def __len__ (self):
"""Return length of first dimension. This is weird but Python's
slicing behavior depends on it."""
@@ -1521,7 +1545,7 @@ def new_repeat(a, repeats, axis=None):
telling how many times to repeat each element.
"""
af = filled(a)
- if isinstance(repeats, types.IntType):
+ if isinstance(repeats, int):
if axis is None:
num = af.size
else:
@@ -2131,11 +2155,13 @@ def asarray(data, dtype=None):
# Add methods to support ndarray interface
# XXX: I is better to to change the masked_*_operation adaptors
# XXX: to wrap ndarray methods directly to create ma.array methods.
-from types import MethodType
+
def _m(f):
- return MethodType(f, None, array)
+ return types.MethodType(f, None, array)
+
def not_implemented(*args, **kwds):
raise NotImplementedError("not yet implemented for numpy.ma arrays")
+
array.all = _m(alltrue)
array.any = _m(sometrue)
array.argmax = _m(argmax)
@@ -2255,7 +2281,7 @@ array.std = _m(_std)
array.view = _m(not_implemented)
array.round = _m(around)
-del _m, MethodType, not_implemented
+del _m, not_implemented
masked = MaskedArray(0, int, mask=1)
diff --git a/numpy/oldnumeric/matrix.py b/numpy/oldnumeric/matrix.py
index ddd612266..25dfe6302 100644
--- a/numpy/oldnumeric/matrix.py
+++ b/numpy/oldnumeric/matrix.py
@@ -1,9 +1,12 @@
-# This module is for compatibility only.
+"""This module is for compatibility only.
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['UserArray', 'squeeze', 'Matrix', 'asarray', 'dot', 'k', 'Numeric', 'LinearAlgebra', 'identity', 'multiply', 'types', 'string']
import types
-from user_array import UserArray, asarray
+from .user_array import UserArray, asarray
import numpy.oldnumeric as Numeric
from numpy.oldnumeric import dot, identity, multiply
import numpy.oldnumeric.linear_algebra as LinearAlgebra
@@ -37,7 +40,7 @@ def _convert_from_string(data):
newrow = []
for col in trow:
temp = col.split()
- newrow.extend(map(_eval,temp))
+ newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
diff --git a/numpy/oldnumeric/misc.py b/numpy/oldnumeric/misc.py
index ccd47efbb..beaafd503 100644
--- a/numpy/oldnumeric/misc.py
+++ b/numpy/oldnumeric/misc.py
@@ -1,5 +1,7 @@
-# Functions that already have the correct syntax or miscellaneous functions
+"""Functions that already have the correct syntax or miscellaneous functions
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['sort', 'copy_reg', 'clip', 'rank',
'sign', 'shape', 'types', 'allclose', 'size',
@@ -12,18 +14,17 @@ __all__ = ['sort', 'copy_reg', 'clip', 'rank',
'dot', 'outerproduct', 'innerproduct', 'insert']
import types
-import StringIO
import pickle
import math
import copy
-import copy_reg
-
import sys
+
if sys.version_info[0] >= 3:
- import copyreg
- import io
- StringIO = io.BytesIO
- copy_reg = copyreg
+ import copyreg as copy_reg
+ from io import BytesIO as StringIO
+else:
+ import copy_reg
+ from StringIO import StringIO
from numpy import sort, clip, rank, sign, shape, putmask, allclose, size,\
choose, swapaxes, array_str, array_repr, e, pi, put, \
@@ -33,4 +34,4 @@ from numpy import sort, clip, rank, sign, shape, putmask, allclose, size,\
correlate as cross_correlate, \
place as insert
-from array_printer import array2string
+from .array_printer import array2string
diff --git a/numpy/oldnumeric/mlab.py b/numpy/oldnumeric/mlab.py
index 2c84b6960..bc2844a87 100644
--- a/numpy/oldnumeric/mlab.py
+++ b/numpy/oldnumeric/mlab.py
@@ -1,4 +1,7 @@
-# This module is for compatibility only. All functions are defined elsewhere.
+"""This module is for compatibility only. All functions are defined elsewhere.
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
@@ -18,7 +21,7 @@ from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as np
-from typeconv import convtypecode
+from .typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
@@ -95,17 +98,17 @@ def corrcoef(x, y=None):
d = diag(c)
return c/sqrt(multiply.outer(d,d))
-from compat import *
-from functions import *
-from precision import *
-from ufuncs import *
-from misc import *
-
-import compat
-import precision
-import functions
-import misc
-import ufuncs
+from .compat import *
+from .functions import *
+from .precision import *
+from .ufuncs import *
+from .misc import *
+
+from . import compat
+from . import precision
+from . import functions
+from . import misc
+from . import ufuncs
import numpy
__version__ = numpy.__version__
diff --git a/numpy/oldnumeric/precision.py b/numpy/oldnumeric/precision.py
index c773e9478..d7b0344ee 100644
--- a/numpy/oldnumeric/precision.py
+++ b/numpy/oldnumeric/precision.py
@@ -1,13 +1,18 @@
-# Lifted from Precision.py. This is for compatibility only.
-#
-# The character strings are still for "new" NumPy
-# which is the only Incompatibility with Numeric
+"""
+
+Lifted from Precision.py. This is for compatibility only.
+
+The character strings are still for "new" NumPy
+which is the only Incompatibility with Numeric
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['Character', 'Complex', 'Float',
'PrecisionError', 'PyObject', 'Int', 'UInt',
'UnsignedInt', 'UnsignedInteger', 'string', 'typecodes', 'zeros']
-from functions import zeros
+from .functions import zeros
import string # for backwards compatibility
typecodes = {'Character':'c', 'Integer':'bhil', 'UnsignedInteger':'BHIL', 'Float':'fd', 'Complex':'FD'}
diff --git a/numpy/oldnumeric/random_array.py b/numpy/oldnumeric/random_array.py
index 777e2a645..ecb3d0b23 100644
--- a/numpy/oldnumeric/random_array.py
+++ b/numpy/oldnumeric/random_array.py
@@ -1,4 +1,7 @@
-# Backward compatible module for RandomArray
+"""Backward compatible module for RandomArray
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['ArgumentError','F','beta','binomial','chi_square', 'exponential',
'gamma', 'get_seed', 'mean_var_test', 'multinomial',
@@ -189,60 +192,60 @@ def mean_var_test(x, type, mean, var, skew=[]):
x_mean = np.sum(x,axis=0)/n
x_minus_mean = x - x_mean
x_var = np.sum(x_minus_mean*x_minus_mean,axis=0)/(n-1.0)
- print "\nAverage of ", len(x), type
- print "(should be about ", mean, "):", x_mean
- print "Variance of those random numbers (should be about ", var, "):", x_var
+ print("\nAverage of ", len(x), type)
+ print("(should be about ", mean, "):", x_mean)
+ print("Variance of those random numbers (should be about ", var, "):", x_var)
if skew != []:
x_skew = (np.sum(x_minus_mean*x_minus_mean*x_minus_mean,axis=0)/9998.)/x_var**(3./2.)
- print "Skewness of those random numbers (should be about ", skew, "):", x_skew
+ print("Skewness of those random numbers (should be about ", skew, "):", x_skew)
def test():
obj = mt.get_state()
mt.set_state(obj)
obj2 = mt.get_state()
if (obj2[1] - obj[1]).any():
- raise SystemExit, "Failed seed test."
- print "First random number is", random()
- print "Average of 10000 random numbers is", np.sum(random(10000),axis=0)/10000.
+ raise SystemExit("Failed seed test.")
+ print("First random number is", random())
+ print("Average of 10000 random numbers is", np.sum(random(10000),axis=0)/10000.)
x = random([10,1000])
if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000:
- raise SystemExit, "random returned wrong shape"
+ raise SystemExit("random returned wrong shape")
x.shape = (10000,)
- print "Average of 100 by 100 random numbers is", np.sum(x,axis=0)/10000.
+ print("Average of 100 by 100 random numbers is", np.sum(x,axis=0)/10000.)
y = uniform(0.5,0.6, (1000,10))
if len(y.shape) !=2 or y.shape[0] != 1000 or y.shape[1] != 10:
- raise SystemExit, "uniform returned wrong shape"
+ raise SystemExit("uniform returned wrong shape")
y.shape = (10000,)
if np.minimum.reduce(y) <= 0.5 or np.maximum.reduce(y) >= 0.6:
- raise SystemExit, "uniform returned out of desired range"
- print "randint(1, 10, shape=[50])"
- print randint(1, 10, shape=[50])
- print "permutation(10)", permutation(10)
- print "randint(3,9)", randint(3,9)
- print "random_integers(10, shape=[20])"
- print random_integers(10, shape=[20])
+ raise SystemExit("uniform returned out of desired range")
+ print("randint(1, 10, shape=[50])")
+ print(randint(1, 10, shape=[50]))
+ print("permutation(10)", permutation(10))
+ print("randint(3,9)", randint(3,9))
+ print("random_integers(10, shape=[20])")
+ print(random_integers(10, shape=[20]))
s = 3.0
x = normal(2.0, s, [10, 1000])
if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000:
- raise SystemExit, "standard_normal returned wrong shape"
+ raise SystemExit("standard_normal returned wrong shape")
x.shape = (10000,)
mean_var_test(x, "normally distributed numbers with mean 2 and variance %f"%(s**2,), 2, s**2, 0)
x = exponential(3, 10000)
mean_var_test(x, "random numbers exponentially distributed with mean %f"%(s,), s, s**2, 2)
x = multivariate_normal(np.array([10,20]), np.array(([1,2],[2,4])))
- print "\nA multivariate normal", x
- if x.shape != (2,): raise SystemExit, "multivariate_normal returned wrong shape"
+ print("\nA multivariate normal", x)
+ if x.shape != (2,): raise SystemExit("multivariate_normal returned wrong shape")
x = multivariate_normal(np.array([10,20]), np.array([[1,2],[2,4]]), [4,3])
- print "A 4x3x2 array containing multivariate normals"
- print x
- if x.shape != (4,3,2): raise SystemExit, "multivariate_normal returned wrong shape"
+ print("A 4x3x2 array containing multivariate normals")
+ print(x)
+ if x.shape != (4,3,2): raise SystemExit("multivariate_normal returned wrong shape")
x = multivariate_normal(np.array([-100,0,100]), np.array([[3,2,1],[2,2,1],[1,1,1]]), 10000)
x_mean = np.sum(x,axis=0)/10000.
- print "Average of 10000 multivariate normals with mean [-100,0,100]"
- print x_mean
+ print("Average of 10000 multivariate normals with mean [-100,0,100]")
+ print(x_mean)
x_minus_mean = x - x_mean
- print "Estimated covariance of 10000 multivariate normals with covariance [[3,2,1],[2,2,1],[1,1,1]]"
- print np.dot(np.transpose(x_minus_mean),x_minus_mean)/9999.
+ print("Estimated covariance of 10000 multivariate normals with covariance [[3,2,1],[2,2,1],[1,1,1]]")
+ print(np.dot(np.transpose(x_minus_mean),x_minus_mean)/9999.)
x = beta(5.0, 10.0, 10000)
mean_var_test(x, "beta(5.,10.) random numbers", 0.333, 0.014)
x = gamma(.01, 2., 10000)
@@ -253,14 +256,14 @@ def test():
mean_var_test(x, "F random numbers with 5 and 10 degrees of freedom", 1.25, 1.35)
x = poisson(50., 10000)
mean_var_test(x, "poisson random numbers with mean 50", 50, 50, 0.14)
- print "\nEach element is the result of 16 binomial trials with probability 0.5:"
- print binomial(16, 0.5, 16)
- print "\nEach element is the result of 16 negative binomial trials with probability 0.5:"
- print negative_binomial(16, 0.5, [16,])
- print "\nEach row is the result of 16 multinomial trials with probabilities [0.1, 0.5, 0.1 0.3]:"
+ print("\nEach element is the result of 16 binomial trials with probability 0.5:")
+ print(binomial(16, 0.5, 16))
+ print("\nEach element is the result of 16 negative binomial trials with probability 0.5:")
+ print(negative_binomial(16, 0.5, [16,]))
+ print("\nEach row is the result of 16 multinomial trials with probabilities [0.1, 0.5, 0.1 0.3]:")
x = multinomial(16, [0.1, 0.5, 0.1], 8)
- print x
- print "Mean = ", np.sum(x,axis=0)/8.
+ print(x)
+ print("Mean = ", np.sum(x,axis=0)/8.)
if __name__ == '__main__':
test()
diff --git a/numpy/oldnumeric/rng.py b/numpy/oldnumeric/rng.py
index 28d3f16df..1ffd47b27 100644
--- a/numpy/oldnumeric/rng.py
+++ b/numpy/oldnumeric/rng.py
@@ -1,8 +1,10 @@
-# This module re-creates the RNG interface from Numeric
-# Replace import RNG with import numpy.oldnumeric.rng as RNG
-#
-# It is for backwards compatibility only.
+"""Re-create the RNG interface from Numeric.
+Replace import RNG with import numpy.oldnumeric.rng as RNG.
+It is for backwards compatibility only.
+
+"""
+from __future__ import division, absolute_import, print_function
__all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution',
'NormalDistribution', 'UniformDistribution', 'error', 'ranf',
@@ -36,7 +38,7 @@ class Distribution(object):
class ExponentialDistribution(Distribution):
def __init__(self, lambda_):
if (lambda_ <= 0):
- raise error, "parameter must be positive"
+ raise error("parameter must be positive")
Distribution.__init__(self, 'exponential', lambda_)
def density(x):
@@ -51,7 +53,7 @@ class LogNormalDistribution(Distribution):
m = float(m)
s = float(s)
if (s <= 0):
- raise error, "standard deviation must be positive"
+ raise error("standard deviation must be positive")
Distribution.__init__(self, 'lognormal', m, s)
sn = math.log(1.0+s*s/(m*m));
self._mn = math.log(m)-0.5*sn
@@ -69,7 +71,7 @@ class NormalDistribution(Distribution):
m = float(m)
s = float(s)
if (s <= 0):
- raise error, "standard deviation must be positive"
+ raise error("standard deviation must be positive")
Distribution.__init__(self, 'normal', m, s)
self._fac = 1.0/math.sqrt(2*math.pi)/s
@@ -84,7 +86,7 @@ class UniformDistribution(Distribution):
b = float(b)
width = b-a
if (width <=0):
- raise error, "width of uniform distribution must be > 0"
+ raise error("width of uniform distribution must be > 0")
Distribution.__init__(self, 'uniform', a, b)
self._fac = 1.0/width
@@ -106,7 +108,7 @@ class CreateGenerator(object):
if dist is None:
dist = default_distribution
if not isinstance(dist, Distribution):
- raise error, "Not a distribution object"
+ raise error("Not a distribution object")
self._dist = dist
def ranf(self):
diff --git a/numpy/oldnumeric/rng_stats.py b/numpy/oldnumeric/rng_stats.py
index 8c7fec433..ed4781e6c 100644
--- a/numpy/oldnumeric/rng_stats.py
+++ b/numpy/oldnumeric/rng_stats.py
@@ -1,3 +1,4 @@
+from __future__ import division, absolute_import, print_function
__all__ = ['average', 'histogram', 'standardDeviation', 'variance']
diff --git a/numpy/oldnumeric/setup.py b/numpy/oldnumeric/setup.py
index 31b5ff3cc..5dd8d3c5c 100644
--- a/numpy/oldnumeric/setup.py
+++ b/numpy/oldnumeric/setup.py
@@ -1,3 +1,4 @@
+from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/oldnumeric/setupscons.py b/numpy/oldnumeric/setupscons.py
deleted file mode 100644
index 82e8a6201..000000000
--- a/numpy/oldnumeric/setupscons.py
+++ /dev/null
@@ -1,8 +0,0 @@
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- return Configuration('oldnumeric',parent_package,top_path)
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/oldnumeric/tests/test_oldnumeric.py b/numpy/oldnumeric/tests/test_oldnumeric.py
index 24d709d2c..5ff8e6bd3 100644
--- a/numpy/oldnumeric/tests/test_oldnumeric.py
+++ b/numpy/oldnumeric/tests/test_oldnumeric.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import unittest
from numpy.testing import *
diff --git a/numpy/oldnumeric/tests/test_regression.py b/numpy/oldnumeric/tests/test_regression.py
index 235ae4fe5..b62a384e4 100644
--- a/numpy/oldnumeric/tests/test_regression.py
+++ b/numpy/oldnumeric/tests/test_regression.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import *
rlevel = 1
diff --git a/numpy/oldnumeric/typeconv.py b/numpy/oldnumeric/typeconv.py
index 4e203d4ae..c4a8c4385 100644
--- a/numpy/oldnumeric/typeconv.py
+++ b/numpy/oldnumeric/typeconv.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['oldtype2dtype', 'convtypecode', 'convtypecode2', 'oldtypecodes']
import numpy as np
diff --git a/numpy/oldnumeric/ufuncs.py b/numpy/oldnumeric/ufuncs.py
index c26050f55..4ab43cb1f 100644
--- a/numpy/oldnumeric/ufuncs.py
+++ b/numpy/oldnumeric/ufuncs.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
__all__ = ['less', 'cosh', 'arcsinh', 'add', 'ceil', 'arctan2', 'floor_divide',
'fmod', 'hypot', 'logical_and', 'power', 'sinh', 'remainder', 'cos',
'equal', 'arccos', 'less_equal', 'divide', 'bitwise_or',
diff --git a/numpy/oldnumeric/user_array.py b/numpy/oldnumeric/user_array.py
index 375c4013b..e5f3ecd01 100644
--- a/numpy/oldnumeric/user_array.py
+++ b/numpy/oldnumeric/user_array.py
@@ -1,4 +1,4 @@
-
+from __future__ import division, absolute_import, print_function
from numpy.oldnumeric import *
from numpy.lib.user_array import container as UserArray
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 8e06fa171..e9ca387c3 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -13,14 +13,16 @@ implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
+from __future__ import division, absolute_import, print_function
+
import warnings
-from polynomial import Polynomial
-from chebyshev import Chebyshev
-from legendre import Legendre
-from hermite import Hermite
-from hermite_e import HermiteE
-from laguerre import Laguerre
+from .polynomial import Polynomial
+from .chebyshev import Chebyshev
+from .legendre import Legendre
+from .hermite import Hermite
+from .hermite_e import HermiteE
+from .laguerre import Laguerre
from numpy.testing import Tester
test = Tester().test
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 00cac2527..db1b637fd 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -85,13 +85,13 @@ References
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow',
@@ -416,7 +416,7 @@ def cheb2poly(c) :
array([ -2., -8., 4., 12.])
"""
- from polynomial import polyadd, polysub, polymulx
+ from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
@@ -878,7 +878,7 @@ def chebder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1437,7 +1437,7 @@ def chebvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
@@ -1742,8 +1742,14 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1789,7 +1795,7 @@ def chebcompanion(c):
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(-c[0]/c[1])
+ return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -2006,4 +2012,4 @@ def chebpts2(npts):
# Chebyshev series class
#
-exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
+exec(polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]'))
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 0b637f40a..13b9e6845 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -57,13 +57,13 @@ See also
`numpy.polynomial`
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline',
'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow',
@@ -160,7 +160,7 @@ def herm2poly(c) :
array([ 0., 1., 2., 3.])
"""
- from polynomial import polyadd, polysub, polymulx
+ from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
@@ -1201,7 +1201,7 @@ def hermvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
@@ -1519,8 +1519,14 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1567,7 +1573,7 @@ def hermcompanion(c):
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(-.5*c[0]/c[1])
+ return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -1741,4 +1747,4 @@ def hermweight(x):
# Hermite series class
#
-exec polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]')
+exec(polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]'))
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index c5abe03ca..9f10403dd 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -57,13 +57,13 @@ See also
`numpy.polynomial`
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
__all__ = ['hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermpow',
@@ -160,7 +160,7 @@ def herme2poly(c) :
array([ 0., 1., 2., 3.])
"""
- from polynomial import polyadd, polysub, polymulx
+ from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
@@ -646,7 +646,7 @@ def hermeder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1198,7 +1198,7 @@ def hermevander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
@@ -1515,8 +1515,14 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1564,7 +1570,7 @@ def hermecompanion(c):
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(-c[0]/c[1])
+ return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -1737,4 +1743,4 @@ def hermeweight(x):
# HermiteE series class
#
-exec polytemplate.substitute(name='HermiteE', nick='herme', domain='[-1,1]')
+exec(polytemplate.substitute(name='HermiteE', nick='herme', domain='[-1,1]'))
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 3533343b0..ea805e146 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -57,13 +57,13 @@ See also
`numpy.polynomial`
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline',
'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow',
@@ -159,7 +159,7 @@ def lag2poly(c) :
array([ 0., 1., 2., 3.])
"""
- from polynomial import polyadd, polysub, polymulx
+ from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
@@ -644,7 +644,7 @@ def lagder(c, m=1, scl=1, axis=0) :
Parameters
----------
- c: array_like
+ c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
@@ -1201,7 +1201,7 @@ def lagvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
@@ -1518,8 +1518,14 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1565,7 +1571,7 @@ def lagcompanion(c):
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(1 + c[0]/c[1])
+ return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -1733,4 +1739,4 @@ def lagweight(x):
# Laguerre series class
#
-exec polytemplate.substitute(name='Laguerre', nick='lag', domain='[-1,1]')
+exec(polytemplate.substitute(name='Laguerre', nick='lag', domain='[-1,1]'))
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 1216e29f4..c7a1f2dd2 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -81,13 +81,13 @@ numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
__all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline',
'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval',
@@ -191,7 +191,7 @@ def leg2poly(c) :
"""
- from polynomial import polyadd, polysub, polymulx
+ from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
@@ -1238,7 +1238,7 @@ def legvander(x, deg) :
Returns
-------
- vander: ndarray
+ vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
@@ -1543,8 +1543,14 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1590,7 +1596,7 @@ def legcompanion(c):
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(-c[0]/c[1])
+ return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -1759,4 +1765,4 @@ def legweight(x):
# Legendre series class
#
-exec polytemplate.substitute(name='Legendre', nick='leg', domain='[-1,1]')
+exec(polytemplate.substitute(name='Legendre', nick='leg', domain='[-1,1]'))
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 324bec9c0..0b044e8e8 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -39,6 +39,7 @@ Misc Functions
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
+- `polycompanion` -- companion matrix in power series form.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- polynomial representing given straight line.
@@ -52,7 +53,7 @@ See also
`numpy.polynomial`
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
__all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline',
'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow',
@@ -62,9 +63,9 @@ __all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline',
import numpy as np
import numpy.linalg as la
-import polyutils as pu
+from . import polyutils as pu
import warnings
-from polytemplate import polytemplate
+from .polytemplate import polytemplate
polytrim = pu.trimcoef
@@ -485,7 +486,7 @@ def polyder(c, m=1, scl=1, axis=0):
Parameters
----------
- c: array_like
+ c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
@@ -1365,8 +1366,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
- # scale the design matrix and solve the least squares equation
- scl = np.sqrt((lhs*lhs).sum(1))
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
@@ -1411,7 +1418,7 @@ def polycompanion(c):
if len(c) < 2 :
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
- return np.array(-c[0]/c[1])
+ return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
@@ -1483,4 +1490,4 @@ def polyroots(c):
# polynomial class
#
-exec polytemplate.substitute(name='Polynomial', nick='poly', domain='[-1,1]')
+exec(polytemplate.substitute(name='Polynomial', nick='poly', domain='[-1,1]'))
diff --git a/numpy/polynomial/polytemplate.py b/numpy/polynomial/polytemplate.py
index bb5effc0e..0505ba5f0 100644
--- a/numpy/polynomial/polytemplate.py
+++ b/numpy/polynomial/polytemplate.py
@@ -9,19 +9,16 @@ creating additional specific polynomial classes (e.g., Legendre, Jacobi,
etc.) in the future, such that all these classes will have a common API.
"""
+from __future__ import division, absolute_import, print_function
+
import string
import sys
-if sys.version_info[0] >= 3:
- rel_import = "from . import"
-else:
- rel_import = "import"
-
polytemplate = string.Template('''
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
-REL_IMPORT polyutils as pu
+from . import polyutils as pu
class $name(pu.PolyBase) :
"""A $name series class.
@@ -809,6 +806,13 @@ class $name(pu.PolyBase) :
----------
roots : array_like
List of roots.
+ domain : {array_like, None}, optional
+ Domain for the resulting instance of $name. If none the domain
+ is the interval from the smallest root to the largest. The
+ default is $domain.
+ window : array_like, optional
+ Window for the resulting instance of $name. The default value
+ is $domain.
Returns
-------
@@ -820,10 +824,13 @@ class $name(pu.PolyBase) :
${nick}fromroots : equivalent function
"""
+ [roots] = pu.as_series([roots], trim=False)
if domain is None :
domain = pu.getdomain(roots)
- rnew = pu.mapdomain(roots, domain, window)
- coef = ${nick}fromroots(rnew)
+ deg = len(roots)
+ off, scl = pu.mapparms(domain, window)
+ rnew = off + scl*roots
+ coef = ${nick}fromroots(rnew) / scl**deg
return $name(coef, domain=domain, window=window)
@staticmethod
@@ -916,4 +923,4 @@ class $name(pu.PolyBase) :
"""
return series.convert(domain, $name, window)
-'''.replace('REL_IMPORT', rel_import))
+''')
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 8861328c4..63743bb40 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -31,7 +31,7 @@ Functions
- `mapparms` -- parameters of the linear map between domains.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
__all__ = ['RankWarning', 'PolyError', 'PolyDomainError', 'PolyBase',
'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain',
@@ -70,16 +70,6 @@ class PolyBase(object) :
pass
#
-# We need the any function for python < 2.5
-#
-if sys.version_info[:2] < (2,5) :
- def any(iterable) :
- for element in iterable:
- if element :
- return True
- return False
-
-#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq) :
@@ -138,7 +128,7 @@ def as_series(alist, trim=True) :
Raises
------
- ValueError :
+ ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
diff --git a/numpy/polynomial/setup.py b/numpy/polynomial/setup.py
index 173fd126c..ab2ff2be8 100644
--- a/numpy/polynomial/setup.py
+++ b/numpy/polynomial/setup.py
@@ -1,4 +1,4 @@
-
+from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index 331cc17bc..367d81f58 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -1,7 +1,7 @@
"""Tests for chebyshev module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.chebyshev as cheb
@@ -265,7 +265,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k])
- res = cheb.chebint(pol, m=j, k=range(j))
+ res = cheb.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -275,7 +275,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
- res = cheb.chebint(pol, m=j, k=range(j), lbnd=-1)
+ res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -285,7 +285,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
- res = cheb.chebint(pol, m=j, k=range(j), scl=2)
+ res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
@@ -435,6 +435,26 @@ class TestFitting(TestCase):
#
wcoef2d = cheb.chebfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
+
+
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, cheb.chebcompanion, [])
+ assert_raises(ValueError, cheb.chebcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(cheb.chebcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
+
class TestGauss(TestCase):
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index fb0d359e0..f8910a473 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -3,7 +3,7 @@
This tests the convert and cast methods of all the polynomial classes.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.polynomial import (
@@ -145,7 +145,9 @@ def check_fromroots(Poly):
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
- p2 = Polynomial.cast(p1, domain=d, window=w)
+ pdom = Polynomial.domain
+ pwin = Polynomial.window
+ p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index b5649a693..327283d0e 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -1,7 +1,7 @@
"""Tests for hermite module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite as herm
@@ -255,7 +255,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k])
- res = herm.hermint(pol, m=j, k=range(j))
+ res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -265,7 +265,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
- res = herm.hermint(pol, m=j, k=range(j), lbnd=-1)
+ res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -275,7 +275,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
- res = herm.hermint(pol, m=j, k=range(j), scl=2)
+ res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
@@ -425,6 +425,26 @@ class TestFitting(TestCase):
#
wcoef2d = herm.hermfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
+
+
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, herm.hermcompanion, [])
+ assert_raises(ValueError, herm.hermcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(herm.hermcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
+
class TestGauss(TestCase):
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index 018fe8595..404a46fc7 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -1,7 +1,7 @@
"""Tests for hermite_e module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
@@ -252,7 +252,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k])
- res = herme.hermeint(pol, m=j, k=range(j))
+ res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -262,7 +262,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
- res = herme.hermeint(pol, m=j, k=range(j), lbnd=-1)
+ res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -272,7 +272,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
- res = herme.hermeint(pol, m=j, k=range(j), scl=2)
+ res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
@@ -422,6 +422,26 @@ class TestFitting(TestCase):
#
wcoef2d = herme.hermefit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
+
+
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, herme.hermecompanion, [])
+ assert_raises(ValueError, herme.hermecompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(herme.hermecompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
+
class TestGauss(TestCase):
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 14fafe37d..38fcce299 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -1,7 +1,7 @@
"""Tests for laguerre module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.laguerre as lag
@@ -250,7 +250,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = lag.lagint(tgt, m=1, k=[k])
- res = lag.lagint(pol, m=j, k=range(j))
+ res = lag.lagint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -260,7 +260,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1)
- res = lag.lagint(pol, m=j, k=range(j), lbnd=-1)
+ res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -270,7 +270,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = lag.lagint(tgt, m=1, k=[k], scl=2)
- res = lag.lagint(pol, m=j, k=range(j), scl=2)
+ res = lag.lagint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_lagint_axis(self):
@@ -420,6 +420,26 @@ class TestFitting(TestCase):
#
wcoef2d = lag.lagfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(lag.lagfit(x, x, 1), [1, -1])
+
+
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, lag.lagcompanion, [])
+ assert_raises(ValueError, lag.lagcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(lag.lagcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
+
class TestGauss(TestCase):
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index cdfaa96f1..379bdee31 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -1,7 +1,7 @@
"""Tests for legendre module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.legendre as leg
@@ -254,7 +254,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k])
- res = leg.legint(pol, m=j, k=range(j))
+ res = leg.legint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -264,7 +264,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
- res = leg.legint(pol, m=j, k=range(j), lbnd=-1)
+ res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -274,7 +274,7 @@ class TestIntegral(TestCase) :
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
- res = leg.legint(pol, m=j, k=range(j), scl=2)
+ res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
@@ -423,6 +423,26 @@ class TestFitting(TestCase):
#
wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
+
+
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, leg.legcompanion, [])
+ assert_raises(ValueError, leg.legcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(leg.legcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
+
class TestGauss(TestCase):
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index bae711cbf..583872978 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -1,7 +1,7 @@
"""Tests for polynomial module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
@@ -253,7 +253,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k])
- res = poly.polyint(pol, m=j, k=range(j))
+ res = poly.polyint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
@@ -263,7 +263,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
- res = poly.polyint(pol, m=j, k=range(j), lbnd=-1)
+ res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
@@ -273,7 +273,7 @@ class TestIntegral(TestCase):
tgt = pol[:]
for k in range(j) :
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
- res = poly.polyint(pol, m=j, k=range(j), scl=2)
+ res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyint_axis(self):
@@ -383,6 +383,21 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
+class TestCompanion(TestCase):
+
+ def test_raises(self):
+ assert_raises(ValueError, poly.polycompanion, [])
+ assert_raises(ValueError, poly.polycompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(poly.polycompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
+
+
class TestMisc(TestCase) :
def test_polyfromroots(self) :
@@ -440,6 +455,11 @@ class TestMisc(TestCase) :
#
wcoef2d = poly.polyfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
+
def test_polytrim(self) :
coef = [2, -1, 1, 0]
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 14bf8bb78..93c742abd 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -1,7 +1,7 @@
"""Tests for polyutils module.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polyutils as pu
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index 9803d931c..96a2ea7d4 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
diff --git a/numpy/random/SConscript b/numpy/random/SConscript
deleted file mode 100644
index a2acb0a66..000000000
--- a/numpy/random/SConscript
+++ /dev/null
@@ -1,50 +0,0 @@
-# Last Change: Wed Nov 19 09:00 PM 2008 J
-# vim:syntax=python
-import os
-
-from numscons import GetNumpyEnvironment, scons_get_mathlib
-
-from setup import needs_mingw_ftime_workaround
-
-def CheckWincrypt(context):
- from copy import deepcopy
- src = """\
-/* check to see if _WIN32 is defined */
-int main(int argc, char *argv[])
-{
-#ifdef _WIN32
- return 0;
-#else
- return 1;
-#endif
-}
-"""
-
- context.Message("Checking if using wincrypt ... ")
- st = context.env.TryRun(src, '.C')
- if st[0] == 0:
- context.Result('No')
- else:
- context.Result('Yes')
- return st[0]
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-mlib = scons_get_mathlib(env)
-env.AppendUnique(LIBS = mlib)
-
-# On windows, see if we should use Advapi32
-if os.name == 'nt':
- config = env.NumpyConfigure(custom_tests = {'CheckWincrypt' : CheckWincrypt})
- if config.CheckWincrypt:
- config.env.AppendUnique(LIBS = 'Advapi32')
- config.Finish()
-
-if needs_mingw_ftime_workaround():
- env.Append(CPPDEFINES=['NPY_NEEDS_MINGW_TIME_WORKAROUND'])
-
-sources = [os.path.join('mtrand', x) for x in
- ['mtrand.c', 'randomkit.c', 'initarray.c', 'distributions.c']]
-
-# XXX: Pyrex dependency
-env.NumpyPythonExtension('mtrand', source = sources)
diff --git a/numpy/random/SConstruct b/numpy/random/SConstruct
deleted file mode 100644
index a377d8391..000000000
--- a/numpy/random/SConstruct
+++ /dev/null
@@ -1,2 +0,0 @@
-from numscons import GetInitEnvironment
-GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript')
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 34e57ba2c..80723cec6 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -86,19 +86,17 @@ set_state Set state of generator.
==================== =========================================================
"""
-# To get sub-modules
-from info import __doc__, __all__
+from __future__ import division, absolute_import, print_function
import warnings
-from numpy.testing.utils import WarningManager
-warn_ctx = WarningManager()
-warn_ctx.__enter__()
-try:
+# To get sub-modules
+from .info import __doc__, __all__
+
+
+with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
- from mtrand import *
-finally:
- warn_ctx.__exit__()
+ from .mtrand import *
# Some aliases:
ranf = random = sample = random_sample
diff --git a/numpy/random/info.py b/numpy/random/info.py
index 6139e5784..970522a95 100644
--- a/numpy/random/info.py
+++ b/numpy/random/info.py
@@ -82,6 +82,7 @@ set_state Set state of generator.
==================== =========================================================
"""
+from __future__ import division, absolute_import, print_function
depends = ['core']
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py
index a37fc7266..ec935e6dd 100644
--- a/numpy/random/mtrand/generate_mtrand_c.py
+++ b/numpy/random/mtrand/generate_mtrand_c.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, absolute_import, print_function
+
import sys
import re
import os
@@ -31,9 +33,8 @@ if __name__ == '__main__':
line = ''
m = re.search(unused_funcs_str, line)
if m:
- print >>sys.stderr, \
- "%s was declared unused, but is used at line %d" % (m.group(),
- linenum+1)
+ print("%s was declared unused, but is used at line %d" % (m.group(),
+ linenum+1), file=sys.stderr)
line = linepat.sub(r'/* "mtrand.pyx":', line)
processed.write(line)
mtrand_c.close()
diff --git a/numpy/random/mtrand/mtrand.c b/numpy/random/mtrand/mtrand.c
index 33c26ba13..b1c92e570 100644
--- a/numpy/random/mtrand/mtrand.c
+++ b/numpy/random/mtrand/mtrand.c
@@ -1,6 +1,18 @@
-/* Generated by Cython 0.17 on Sun Sep 2 09:46:21 2012 */
+/* Generated by Cython 0.19 on Fri Jun 28 16:46:58 2013 */
#define PY_SSIZE_T_CLEAN
+#ifndef CYTHON_USE_PYLONG_INTERNALS
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#else
+#include "pyconfig.h"
+#ifdef PYLONG_BITS_IN_DIGIT
+#define CYTHON_USE_PYLONG_INTERNALS 1
+#else
+#define CYTHON_USE_PYLONG_INTERNALS 0
+#endif
+#endif
+#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
@@ -53,12 +65,15 @@
(PyErr_Format(PyExc_TypeError, \
"expected index value, got %.200s", Py_TYPE(o)->tp_name), \
(PyObject*)0))
- #define PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && !PyComplex_Check(o))
+ #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
+ !PyComplex_Check(o))
+ #define PyIndex_Check __Pyx_PyIndex_Check
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#define __PYX_BUILD_PY_SSIZE_T "i"
#else
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
+ #define __Pyx_PyIndex_Check PyIndex_Check
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
@@ -113,6 +128,9 @@
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
+#if PY_VERSION_HEX < 0x02060000
+ #define Py_TPFLAGS_HAVE_VERSION_TAG 0
+#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
@@ -152,6 +170,14 @@
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
+ PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (Py_TYPE(obj) == &PyBaseString_Type)
+#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
@@ -224,6 +250,29 @@
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
+ a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
+ a quiet NaN. */
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
#if PY_MAJOR_VERSION >= 3
@@ -264,21 +313,6 @@
#define CYTHON_WITHOUT_ASSERTIONS
#endif
-
-/* inline attribute */
-#ifndef CYTHON_INLINE
- #if defined(__GNUC__)
- #define CYTHON_INLINE __inline__
- #elif defined(_MSC_VER)
- #define CYTHON_INLINE __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_INLINE inline
- #else
- #define CYTHON_INLINE
- #endif
-#endif
-
-/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
@@ -292,30 +326,130 @@
# define CYTHON_UNUSED
# endif
#endif
-
-typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
-
-
-/* Type Conversion Predeclarations */
-
-#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
-#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
-
+typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s)
+#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s)
+#define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s)
+#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s)
+#if PY_MAJOR_VERSION < 3
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
+{
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return u_end - u - 1;
+}
+#else
+#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
+#endif
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
-
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
-
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params() {
+ PyObject* sys = NULL;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ sys = PyImport_ImportModule("sys");
+ if (sys == NULL) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ if (default_encoding == NULL) goto bad;
+ if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ const char* default_encoding_c = PyBytes_AS_STRING(default_encoding);
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (ascii_chars_u == NULL) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ }
+ Py_XDECREF(sys);
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return 0;
+bad:
+ Py_XDECREF(sys);
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params() {
+ PyObject* sys = NULL;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (sys == NULL) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ if (default_encoding == NULL) goto bad;
+ default_encoding_c = PyBytes_AS_STRING(default_encoding);
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(sys);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(sys);
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
#ifdef __GNUC__
/* Test for GCC > 2.95 */
@@ -330,8 +464,9 @@ static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
-
+
static PyObject *__pyx_m;
+static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
@@ -430,7 +565,7 @@ typedef long (*__pyx_t_6mtrand_rk_discnmN)(rk_state *, long, long, long);
*/
typedef long (*__pyx_t_6mtrand_rk_discd)(rk_state *, double);
-/* "mtrand.pyx":524
+/* "mtrand.pyx":525
* return sum
*
* cdef class RandomState: # <<<<<<<<<<<<<<
@@ -496,7 +631,24 @@ struct __pyx_obj_6mtrand_RandomState {
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
-static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
@@ -512,91 +664,30 @@ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
- PyObject *r;
- if (!j) return NULL;
- r = PyObject_GetItem(o, j);
- Py_DECREF(j);
- return r;
-}
-#define __Pyx_GetItemInt_List(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_List_Fast(o, i) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i) {
-#if CYTHON_COMPILING_IN_CPYTHON
- if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
- PyObject *r = PyList_GET_ITEM(o, i);
- Py_INCREF(r);
- return r;
- }
- else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
- PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
- Py_INCREF(r);
- return r;
- }
- return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-#else
- return PySequence_GetItem(o, i);
-#endif
-}
-#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_Tuple_Fast(o, i) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i) {
-#if CYTHON_COMPILING_IN_CPYTHON
- if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
- PyObject *r = PyTuple_GET_ITEM(o, i);
- Py_INCREF(r);
- return r;
- }
- else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
- PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
- Py_INCREF(r);
- return r;
- }
- return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-#else
- return PySequence_GetItem(o, i);
-#endif
-}
-#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_Fast(o, i) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
-#if CYTHON_COMPILING_IN_CPYTHON
- if (PyList_CheckExact(o)) {
- Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
- if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
- PyObject *r = PyList_GET_ITEM(o, n);
- Py_INCREF(r);
- return r;
- }
- }
- else if (PyTuple_CheckExact(o)) {
- Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
- if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
- PyObject *r = PyTuple_GET_ITEM(o, n);
- Py_INCREF(r);
- return r;
- }
- } else { /* inlined PySequence_GetItem() */
- PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
- if (likely(m && m->sq_item)) {
- if (unlikely(i < 0) && likely(m->sq_length)) {
- Py_ssize_t l = m->sq_length(o);
- if (unlikely(l < 0)) return NULL;
- i += l;
- }
- return m->sq_item(o, i);
- }
- }
-#else
- if (PySequence_Check(o)) {
- return PySequence_GetItem(o, i);
- }
-#endif
- return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
-}
+#define __Pyx_GetItemInt(o, i, size, to_py_func, is_list, wraparound, boundscheck) \
+ (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Fast(o, i, is_list, wraparound, boundscheck) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+#define __Pyx_GetItemInt_List(o, i, size, to_py_func, is_list, wraparound, boundscheck) \
+ (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_List_Fast(o, i, wraparound, boundscheck) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func, is_list, wraparound, boundscheck) \
+ (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_GetItemInt_Tuple_Fast(o, i, wraparound, boundscheck) : \
+ __Pyx_GetItemInt_Generic(o, to_py_func(i)))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+ PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+ int has_cstart, int has_cstop, int wraparound);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
@@ -608,63 +699,79 @@ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
+#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) \
+ __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)
+static CYTHON_INLINE int __Pyx_PyObject_SetSlice(
+ PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+ int has_cstart, int has_cstop, int wraparound);
+
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o,n,NULL)
+static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_setattro))
+ return tp->tp_setattro(obj, attr_name, value);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_setattr))
+ return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value);
+#endif
+ return PyObject_SetAttr(obj, attr_name, value);
+}
+#else
+#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
+#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
+#endif
+
static CYTHON_INLINE int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /*proto*/
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
-#define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \
- __Pyx_SetItemInt_Fast(o, i, v) : \
- __Pyx_SetItemInt_Generic(o, to_py_func(i), v))
-static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
- int r;
- if (!j) return -1;
- r = PyObject_SetItem(o, j, v);
- Py_DECREF(j);
- return r;
-}
-static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) {
#if CYTHON_COMPILING_IN_CPYTHON
- if (PyList_CheckExact(o)) {
- Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
- if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
- PyObject* old = PyList_GET_ITEM(o, n);
- Py_INCREF(v);
- PyList_SET_ITEM(o, n, v);
- Py_DECREF(old);
- return 1;
- }
- } else { /* inlined PySequence_SetItem() */
- PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
- if (likely(m && m->sq_ass_item)) {
- if (unlikely(i < 0) && likely(m->sq_length)) {
- Py_ssize_t l = m->sq_length(o);
- if (unlikely(l < 0)) return -1;
- i += l;
- }
- return m->sq_ass_item(o, i, v);
- }
+static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
+ PyListObject* L = (PyListObject*) list;
+ Py_ssize_t len = Py_SIZE(list);
+ if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
+ Py_INCREF(x);
+ PyList_SET_ITEM(list, len, x);
+ Py_SIZE(list) = len+1;
+ return 0;
}
+ return PyList_Append(list, x);
+}
#else
-#if CYTHON_COMPILING_IN_PYPY
- if (PySequence_Check(o) && !PyDict_Check(o)) {
+#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyList_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop);
+static CYTHON_INLINE PyObject* __Pyx_PyTuple_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop);
#else
- if (PySequence_Check(o)) {
+#define __Pyx_PyList_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop)
+#define __Pyx_PyTuple_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop)
#endif
- return PySequence_SetItem(o, i, v);
- }
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/
+
+#ifndef __PYX_FORCE_INIT_THREADS
+ #define __PYX_FORCE_INIT_THREADS 0
#endif
- return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
-}
+
+#define __Pyx_SetItemInt(o, i, v, size, to_py_func, is_list, wraparound, boundscheck) \
+ (((size) <= sizeof(Py_ssize_t)) ? \
+ __Pyx_SetItemInt_Fast(o, i, v, is_list, wraparound, boundscheck) : \
+ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))
+static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
+static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
+ int is_list, int wraparound, int boundscheck);
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
static CYTHON_INLINE npy_intp __Pyx_PyInt_from_py_npy_intp(PyObject *);
-static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name);
-
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp);
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
@@ -817,155 +924,156 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_alpha, PyObject *__pyx_v_size); /* proto */
static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_x); /* proto */
static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_x); /* proto */
+static PyObject *__pyx_tp_new_6mtrand_RandomState(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static char __pyx_k_1[] = "size is not compatible with inputs";
static char __pyx_k_9[] = "algorithm must be 'MT19937'";
-static char __pyx_k_11[] = "state must be 624 longs";
-static char __pyx_k_13[] = "low >= high";
-static char __pyx_k_16[] = "a must be greater than 0";
-static char __pyx_k_18[] = "a must be 1-dimensional";
-static char __pyx_k_20[] = "a must be non-empty";
-static char __pyx_k_22[] = "p must be 1-dimensional";
-static char __pyx_k_24[] = "a and p must have same size";
-static char __pyx_k_26[] = "probabilities are not non-negative";
-static char __pyx_k_28[] = "probabilities do not sum to 1";
-static char __pyx_k_30[] = "";
-static char __pyx_k_31[] = "Cannot take a larger sample than ";
-static char __pyx_k_32[] = "population when 'replace=False'";
-static char __pyx_k_33[] = "Fewer non-zero entries in p than size";
-static char __pyx_k_39[] = "scale <= 0";
-static char __pyx_k_42[] = "a <= 0";
-static char __pyx_k_44[] = "b <= 0";
-static char __pyx_k_51[] = "shape <= 0";
-static char __pyx_k_61[] = "dfnum <= 0";
-static char __pyx_k_63[] = "dfden <= 0";
-static char __pyx_k_65[] = "dfnum <= 1";
-static char __pyx_k_68[] = "nonc < 0";
-static char __pyx_k_73[] = "df <= 0";
-static char __pyx_k_77[] = "nonc <= 0";
-static char __pyx_k_79[] = "df <= 1";
-static char __pyx_k_84[] = "kappa < 0";
+static char __pyx_k_13[] = "state must be 624 longs";
+static char __pyx_k_15[] = "low >= high";
+static char __pyx_k_18[] = "a must be 1-dimensional or an integer";
+static char __pyx_k_20[] = "a must be greater than 0";
+static char __pyx_k_22[] = "a must be 1-dimensional";
+static char __pyx_k_24[] = "a must be non-empty";
+static char __pyx_k_26[] = "p must be 1-dimensional";
+static char __pyx_k_28[] = "a and p must have same size";
+static char __pyx_k_30[] = "probabilities are not non-negative";
+static char __pyx_k_32[] = "probabilities do not sum to 1";
+static char __pyx_k_34[] = "Cannot take a larger sample than population when 'replace=False'";
+static char __pyx_k_36[] = "Fewer non-zero entries in p than size";
+static char __pyx_k_44[] = "scale <= 0";
+static char __pyx_k_47[] = "a <= 0";
+static char __pyx_k_49[] = "b <= 0";
+static char __pyx_k_56[] = "shape <= 0";
+static char __pyx_k_66[] = "dfnum <= 0";
+static char __pyx_k_68[] = "dfden <= 0";
+static char __pyx_k_70[] = "dfnum <= 1";
+static char __pyx_k_73[] = "nonc < 0";
+static char __pyx_k_78[] = "df <= 0";
+static char __pyx_k_82[] = "nonc <= 0";
+static char __pyx_k_84[] = "df <= 1";
+static char __pyx_k_89[] = "kappa < 0";
static char __pyx_k__a[] = "a";
static char __pyx_k__b[] = "b";
static char __pyx_k__f[] = "f";
static char __pyx_k__l[] = "l";
static char __pyx_k__n[] = "n";
static char __pyx_k__p[] = "p";
-static char __pyx_k_107[] = "sigma <= 0";
-static char __pyx_k_109[] = "sigma <= 0.0";
-static char __pyx_k_113[] = "scale <= 0.0";
-static char __pyx_k_115[] = "mean <= 0";
-static char __pyx_k_118[] = "mean <= 0.0";
-static char __pyx_k_121[] = "left > mode";
-static char __pyx_k_123[] = "mode > right";
-static char __pyx_k_125[] = "left == right";
-static char __pyx_k_130[] = "n <= 0";
-static char __pyx_k_132[] = "p < 0";
-static char __pyx_k_134[] = "p > 1";
-static char __pyx_k_146[] = "lam < 0";
-static char __pyx_k_148[] = "lam value too large";
-static char __pyx_k_151[] = "lam value too large.";
-static char __pyx_k_153[] = "a <= 1.0";
-static char __pyx_k_156[] = "p < 0.0";
-static char __pyx_k_158[] = "p > 1.0";
-static char __pyx_k_162[] = "ngood < 1";
-static char __pyx_k_164[] = "nbad < 1";
-static char __pyx_k_166[] = "nsample < 1";
-static char __pyx_k_168[] = "ngood + nbad < nsample";
-static char __pyx_k_174[] = "p <= 0.0";
-static char __pyx_k_176[] = "p >= 1.0";
-static char __pyx_k_180[] = "mean must be 1 dimensional";
-static char __pyx_k_182[] = "cov must be 2 dimensional and square";
-static char __pyx_k_184[] = "mean and cov must have same length";
-static char __pyx_k_186[] = "numpy.dual";
-static char __pyx_k_187[] = "sum(pvals[:-1]) > 1.0";
-static char __pyx_k_191[] = "standard_exponential";
-static char __pyx_k_192[] = "noncentral_chisquare";
-static char __pyx_k_193[] = "RandomState.random_sample (line 721)";
-static char __pyx_k_194[] = "\n random_sample(size=None)\n\n Return random floats in the half-open interval [0.0, 1.0).\n\n Results are from the \"continuous uniform\" distribution over the\n stated interval. To sample :math:`Unif[a, b), b > a` multiply\n the output of `random_sample` by `(b-a)` and add `a`::\n\n (b - a) * random_sample() + a\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Defines the shape of the returned array of random floats. If None\n (the default), returns a single float.\n\n Returns\n -------\n out : float or ndarray of floats\n Array of random floats of shape `size` (unless ``size=None``, in which\n case a single float is returned).\n\n Examples\n --------\n >>> np.random.random_sample()\n 0.47108547995356098\n >>> type(np.random.random_sample())\n <type 'float'>\n >>> np.random.random_sample((5,))\n array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428])\n\n Three-by-two array of random numbers from [-5, 0):\n\n >>> 5 * np.random.random_sample((3, 2)) - 5\n array([[-3.99149989, -0.52338984],\n [-2.99091858, -0.79479508],\n [-1.23204345, -1.75224494]])\n\n ";
-static char __pyx_k_195[] = "RandomState.tomaxint (line 764)";
-static char __pyx_k_196[] = "\n tomaxint(size=None)\n\n Random integers between 0 and ``sys.maxint``, inclusive.\n\n Return a sample of uniformly distributed random integers in the interval\n [0, ``sys.maxint``].\n\n Parameters\n ----------\n size : tuple of ints, int, optional\n Shape of output. If this is, for example, (m,n,k), m*n*k samples\n are generated. If no shape is specified, a single sample is\n returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Uniform sampling over a given half-open interval of integers.\n random_integers : Uniform sampling over a given closed interval of\n integers.\n\n Examples\n --------\n >>> RS = np.random.mtrand.RandomState() # need a RandomState object\n >>> RS.tomaxint((2,2,2))\n array([[[1170048599, 1600360186],\n [ 739731006, 1947757578]],\n [[1871712945, 752307660],\n [1601631370, 1479324245]]])\n >>> import sys\n >>> sys.maxint\n 2147483647\n >>> RS.tomaxint((2,2,2)) < sys.maxint\n array([[[ True, True],\n [ True, True]],\n [[ True, True],\n [ True, True]]], dtype=bool)\n\n ";
-static char __pyx_k_197[] = "RandomState.randint (line 811)";
-static char __pyx_k_198[] = "\n randint(low, high=None, size=None)\n\n Return random integers from `low` (inclusive) to `high` (exclusive).\n\n Return random integers from the \"discrete uniform\" distribution in the\n \"half-open\" interval [`low`, `high`). If `high` is None (the default),\n then results are from [0, `low`).\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is the *highest* such\n integer).\n high : int, optional\n If provided, one above the largest (signed) integer to be drawn\n from the distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single int is\n returned.\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.random_integers : similar to `randint`, only for the closed\n interval [`low`, `high`], and 1 is the lowest value if `high` is\n omitted. In particular, this other one is the one to use to generate\n uniformly distributed discrete non-integers.\n\n Examples\n --------\n >>> np.random.randint(2, size=10)\n array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])\n >>> np.random.randint(1, size=10)\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n Generate a 2 x 4 array of ints between 0 and 4, inclusive:\n\n >>> np.random.randint(5, size=(2, 4))\n array([[4, 0, 2, 1],\n [3, 2, 2, 0]])\n\n ";
-static char __pyx_k_199[] = "RandomState.bytes (line 891)";
-static char __pyx_k_200[] = "\n bytes(length)\n\n Return random bytes.\n\n Parameters\n ----------\n length : int\n Number of random bytes.\n\n Returns\n -------\n out : str\n String of length `length`.\n\n Examples\n --------\n >>> np.random.bytes(10)\n ' eh\\x85\\x022SZ\\xbf\\xa4' #random\n\n ";
-static char __pyx_k_201[] = "RandomState.choice (line 919)";
-static char __pyx_k_202[] = "\n choice(a, size=1, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int\n Positive integer, the size of the sample.\n replace : boolean, optional\n Whether the sample is with or without replacement\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3)\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False)\n array([3,1,0])\n >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n "" Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n ";
-static char __pyx_k_203[] = "RandomState.uniform (line 1061)";
-static char __pyx_k_204[] = "\n uniform(low=0.0, high=1.0, size=1)\n\n Draw samples from a uniform distribution.\n\n Samples are uniformly distributed over the half-open interval\n ``[low, high)`` (includes low, but excludes high). In other words,\n any value within the given interval is equally likely to be drawn\n by `uniform`.\n\n Parameters\n ----------\n low : float, optional\n Lower boundary of the output interval. All values generated will be\n greater than or equal to low. The default value is 0.\n high : float\n Upper boundary of the output interval. All values generated will be\n less than high. The default value is 1.0.\n size : int or tuple of ints, optional\n Shape of output. If the given size is, for example, (m,n,k),\n m*n*k samples are generated. If no shape is specified, a single sample\n is returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Discrete uniform distribution, yielding integers.\n random_integers : Discrete uniform distribution over the closed\n interval ``[low, high]``.\n random_sample : Floats uniformly distributed over ``[0, 1)``.\n random : Alias for `random_sample`.\n rand : Convenience function that accepts dimensions as input, e.g.,\n ``rand(2,2)`` would generate a 2-by-2 array of floats,\n uniformly distributed over ``[0, 1)``.\n\n Notes\n -----\n The probability density function of the uniform distribution is\n\n .. math:: p(x) = \\frac{1}{b - a}\n\n anywhere within the interval ``[a, b)``, and zero elsewhere.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> s = np.random.uniform(-1,0,1000)\n\n All values are w""ithin the given interval:\n\n >>> np.all(s >= -1)\n True\n >>> np.all(s < 0)\n True\n\n Display the histogram of the samples, along with the\n probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 15, normed=True)\n >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_205[] = "RandomState.rand (line 1148)";
-static char __pyx_k_206[] = "\n rand(d0, d1, ..., dn)\n\n Random values in a given shape.\n\n Create an array of the given shape and propagate it with\n random samples from a uniform distribution\n over ``[0, 1)``.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, should all be positive.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n out : ndarray, shape ``(d0, d1, ..., dn)``\n Random values.\n\n See Also\n --------\n random\n\n Notes\n -----\n This is a convenience function. If you want an interface that\n takes a shape-tuple as the first argument, refer to\n np.random.random_sample .\n\n Examples\n --------\n >>> np.random.rand(3,2)\n array([[ 0.14022471, 0.96360618], #random\n [ 0.37601032, 0.25528411], #random\n [ 0.49313049, 0.94909878]]) #random\n\n ";
-static char __pyx_k_207[] = "RandomState.randn (line 1192)";
-static char __pyx_k_208[] = "\n randn(d0, d1, ..., dn)\n\n Return a sample (or samples) from the \"standard normal\" distribution.\n\n If positive, int_like or int-convertible arguments are provided,\n `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled\n with random floats sampled from a univariate \"normal\" (Gaussian)\n distribution of mean 0 and variance 1 (if any of the :math:`d_i` are\n floats, they are first converted to integers by truncation). A single\n float randomly sampled from the distribution is returned if no\n argument is provided.\n\n This is a convenience function. If you want an interface that takes a\n tuple as the first argument, use `numpy.random.standard_normal` instead.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, should be all positive.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n Z : ndarray or float\n A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from\n the standard normal distribution, or a single such float if\n no parameters were supplied.\n\n See Also\n --------\n random.standard_normal : Similar, but takes a tuple as its argument.\n\n Notes\n -----\n For random samples from :math:`N(\\mu, \\sigma^2)`, use:\n\n ``sigma * np.random.randn(...) + mu``\n\n Examples\n --------\n >>> np.random.randn()\n 2.1923875335537315 #random\n\n Two-by-four array of samples from N(3, 6.25):\n\n >>> 2.5 * np.random.randn(2, 4) + 3\n array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random\n [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random\n\n ";
-static char __pyx_k_209[] = "RandomState.random_integers (line 1249)";
-static char __pyx_k_210[] = "\n random_integers(low, high=None, size=None)\n\n Return random integers between `low` and `high`, inclusive.\n\n Return random integers from the \"discrete uniform\" distribution in the\n closed interval [`low`, `high`]. If `high` is None (the default),\n then results are from [1, `low`].\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is the *highest* such\n integer).\n high : int, optional\n If provided, the largest (signed) integer to be drawn from the\n distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single int is returned.\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.randint : Similar to `random_integers`, only for the half-open\n interval [`low`, `high`), and 0 is the lowest value if `high` is\n omitted.\n\n Notes\n -----\n To sample from N evenly spaced floating-point numbers between a and b,\n use::\n\n a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.)\n\n Examples\n --------\n >>> np.random.random_integers(5)\n 4\n >>> type(np.random.random_integers(5))\n <type 'int'>\n >>> np.random.random_integers(5, size=(3.,2.))\n array([[5, 4],\n [3, 3],\n [4, 5]])\n\n Choose five random numbers from the set of five evenly-spaced\n numbers between 0 and 2.5, inclusive (*i.e.*, from the set\n :math:`{0, 5/8, 10/8, 15/8, 20/8}`):\n""\n >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.\n array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ])\n\n Roll two six sided dice 1000 times and sum the results:\n\n >>> d1 = np.random.random_integers(1, 6, 1000)\n >>> d2 = np.random.random_integers(1, 6, 1000)\n >>> dsums = d1 + d2\n\n Display results as a histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(dsums, 11, normed=True)\n >>> plt.show()\n\n ";
-static char __pyx_k_211[] = "RandomState.standard_normal (line 1327)";
-static char __pyx_k_212[] = "\n standard_normal(size=None)\n\n Returns samples from a Standard Normal distribution (mean=0, stdev=1).\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n >>> s = np.random.standard_normal(8000)\n >>> s\n array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random\n -0.38672696, -0.4685006 ]) #random\n >>> s.shape\n (8000,)\n >>> s = np.random.standard_normal(size=(3, 4, 2))\n >>> s.shape\n (3, 4, 2)\n\n ";
-static char __pyx_k_213[] = "RandomState.normal (line 1359)";
-static char __pyx_k_214[] = "\n normal(loc=0.0, scale=1.0, size=None)\n\n Draw random samples from a normal (Gaussian) distribution.\n\n The probability density function of the normal distribution, first\n derived by De Moivre and 200 years later by both Gauss and Laplace\n independently [2]_, is often called the bell curve because of\n its characteristic shape (see the example below).\n\n The normal distributions occurs often in nature. For example, it\n describes the commonly occurring distribution of samples influenced\n by a large number of tiny, random disturbances, each with its own\n unique distribution [2]_.\n\n Parameters\n ----------\n loc : float\n Mean (\"centre\") of the distribution.\n scale : float\n Standard deviation (spread or \"width\") of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.norm : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gaussian distribution is\n\n .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}\n e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },\n\n where :math:`\\mu` is the mean and :math:`\\sigma` the standard deviation.\n The square of the standard deviation, :math:`\\sigma^2`, is called the\n variance.\n\n The function has its peak at the mean, and its \"spread\" increases with\n the standard deviation (the function reaches 0.607 times its maximum at\n :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that\n `numpy.random.normal` is more likely to return samples lying close to the\n mean, rather than those far away.\n""\n References\n ----------\n .. [1] Wikipedia, \"Normal distribution\",\n http://en.wikipedia.org/wiki/Normal_distribution\n .. [2] P. R. Peebles Jr., \"Central Limit Theorem\" in \"Probability, Random\n Variables and Random Signal Principles\", 4th ed., 2001,\n pp. 51, 51, 125.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, sigma = 0, 0.1 # mean and standard deviation\n >>> s = np.random.normal(mu, sigma, 1000)\n\n Verify the mean and the variance:\n\n >>> abs(mu - np.mean(s)) < 0.01\n True\n\n >>> abs(sigma - np.std(s, ddof=1)) < 0.01\n True\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\n ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_215[] = "RandomState.standard_exponential (line 1572)";
-static char __pyx_k_216[] = "\n standard_exponential(size=None)\n\n Draw samples from the standard exponential distribution.\n\n `standard_exponential` is identical to the exponential distribution\n with a scale parameter of 1.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n Output a 3x8000 array:\n\n >>> n = np.random.standard_exponential((3, 8000))\n\n ";
-static char __pyx_k_217[] = "RandomState.standard_gamma (line 1600)";
-static char __pyx_k_218[] = "\n standard_gamma(shape, size=None)\n\n Draw samples from a Standard Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n shape (sometimes designated \"k\") and scale=1.\n\n Parameters\n ----------\n shape : float\n Parameter, should be > 0.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or scalar\n The drawn samples.\n\n See Also\n --------\n scipy.stats.distributions.gamma : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma-distribution\",\n http://en.wikipedia.org/wiki/Gamma-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2., 1. # mean and width\n >>> s = np.random.standard_gamma(shape, 1000000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt""\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\\n ... (sps.gamma(shape) * scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_219[] = "RandomState.gamma (line 1682)";
-static char __pyx_k_220[] = "\n gamma(shape, scale=1.0, size=None)\n\n Draw samples from a Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n `shape` (sometimes designated \"k\") and `scale` (sometimes designated\n \"theta\"), where both parameters are > 0.\n\n Parameters\n ----------\n shape : scalar > 0\n The shape of the gamma distribution.\n scale : scalar > 0, optional\n The scale of the gamma distribution. Default is equal to 1.\n size : shape_tuple, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n out : ndarray, float\n Returns one sample unless `size` parameter is specified.\n\n See Also\n --------\n scipy.stats.distributions.gamma : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma-distribution\",\n http://en.wikipedia.org/wiki/Gamma-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2.,"" 2. # mean and dispersion\n >>> s = np.random.gamma(shape, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1)*(np.exp(-bins/scale) /\n ... (sps.gamma(shape)*scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_221[] = "RandomState.f (line 1773)";
-static char __pyx_k_222[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from a F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom\n in denominator), where both parameters should be greater than zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : float\n Degrees of freedom in denominator. Should be greater than zero.\n size : {tuple, int}, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``,\n then ``m * n * k`` samples are drawn. By default only one sample\n is returned.\n\n Returns\n -------\n samples : {ndarray, scalar}\n Samples from the Fisher distribution.\n\n See Also\n --------\n scipy.stats.distributions.f : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.""\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40.\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n ";
-static char __pyx_k_223[] = "RandomState.noncentral_f (line 1876)";
-static char __pyx_k_224[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : int\n Parameter, should be > 1.\n dfden : int\n Parameter, should be > 1.\n nonc : float\n Parameter, should be >= 0.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : scalar or ndarray\n Drawn samples.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n Weisstein, Eric W. \"Noncentral F-Distribution.\" From MathWorld--A Wolfram\n Web Resource. http://mathworld.wolfram.com/NoncentralF-Distribution.html\n\n Wikipedia, \"Noncentral F distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-distribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comp""arison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n ";
-static char __pyx_k_225[] = "RandomState.chisquare (line 1971)";
-static char __pyx_k_226[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : int\n Number of degrees of freedom.\n size : tuple of ints, int, optional\n Size of the returned array. By default, a scalar is\n returned.\n\n Returns\n -------\n output : ndarray\n Samples drawn from the distribution, packed in a `size`-shaped\n array.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n `NIST/SEMATECH e-Handbook of Statistical Methods\n <http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm>`_\n\n Examples\n --------\n >>> np.random.chisquare(2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n ";
-static char __pyx_k_227[] = "RandomState.noncentral_chisquare (line 2049)";
-static char __pyx_k_228[] = "\n noncentral_chisquare(df, nonc, size=None)\n\n Draw samples from a noncentral chi-square distribution.\n\n The noncentral :math:`\\chi^2` distribution is a generalisation of\n the :math:`\\chi^2` distribution.\n\n Parameters\n ----------\n df : int\n Degrees of freedom, should be >= 1.\n nonc : float\n Non-centrality, should be > 0.\n size : int or tuple of ints\n Shape of the output.\n\n Notes\n -----\n The probability density function for the noncentral Chi-square distribution\n is\n\n .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}\n \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}P_{Y_{df+2i}}(x),\n\n where :math:`Y_{q}` is the Chi-square with q degrees of freedom.\n\n In Delhi (2007), it is noted that the noncentral chi-square is useful in\n bombing and coverage problems, the probability of killing the point target\n given by the noncentral chi-squared distribution.\n\n References\n ----------\n .. [1] Delhi, M.S. Holla, \"On a noncentral chi-square distribution in the\n analysis of weapon systems effectiveness\", Metrika, Volume 15,\n Number 1 / December, 1970.\n .. [2] Wikipedia, \"Noncentral chi-square distribution\"\n http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n "" ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n ";
-static char __pyx_k_229[] = "RandomState.standard_cauchy (line 2141)";
-static char __pyx_k_230[] = "\n standard_cauchy(size=None)\n\n Standard Cauchy distribution with mode = 0.\n\n Also known as the Lorentz distribution.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n samples : ndarray or scalar\n The drawn samples.\n\n Notes\n -----\n The probability density function for the full Cauchy distribution is\n\n .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+\n (\\frac{x-x_0}{\\gamma})^2 \\bigr] }\n\n and the Standard Cauchy distribution just sets :math:`x_0=0` and\n :math:`\\gamma=1`\n\n The Cauchy distribution arises in the solution to the driven harmonic\n oscillator problem, and also describes spectral line broadening. It\n also describes the distribution of values at which a line tilted at\n a random angle will cut the x axis.\n\n When studying hypothesis tests that assume normality, seeing how the\n tests perform on data from a Cauchy distribution is a good indicator of\n their sensitivity to a heavy-tailed distribution, since the Cauchy looks\n very much like a Gaussian distribution, but with heavier tails.\n\n References\n ----------\n .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, \"Cauchy\n Distribution\",\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm\n .. [2] Weisstein, Eric W. \"Cauchy Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/CauchyDistribution.html\n .. [3] Wikipedia, \"Cauchy distribution\"\n http://en.wikipedia.org/wiki/Cauchy_distribution\n\n Examples\n --------\n Draw samples and plot the distribution:\n\n >>> s = np.random.standard_cauchy(1000000)\n >>> s = s[(s>-25) & (s<""25)] # truncate distribution so it plots well\n >>> plt.hist(s, bins=100)\n >>> plt.show()\n\n ";
-static char __pyx_k_231[] = "RandomState.standard_t (line 2202)";
-static char __pyx_k_232[] = "\n standard_t(df, size=None)\n\n Standard Student's t distribution with df degrees of freedom.\n\n A special case of the hyperbolic distribution.\n As `df` gets large, the result resembles that of the standard normal\n distribution (`standard_normal`).\n\n Parameters\n ----------\n df : int\n Degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n Drawn samples.\n\n Notes\n -----\n The probability density function for the t distribution is\n\n .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}\n \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}\n\n The t test is based on an assumption that the data come from a Normal\n distribution. The t test provides a way to test whether the sample mean\n (that is the mean calculated from the data) is a good estimate of the true\n mean.\n\n The derivation of the t-distribution was forst published in 1908 by William\n Gisset while working for the Guinness Brewery in Dublin. Due to proprietary\n issues, he had to publish under a pseudonym, and so he used the name\n Student.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics With R\",\n Springer, 2002.\n .. [2] Wikipedia, \"Student's t-distribution\"\n http://en.wikipedia.org/wiki/Student's_t-distribution\n\n Examples\n --------\n From Dalgaard page 83 [1]_, suppose the daily energy intake for 11\n women in Kj is:\n\n >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Doe""s their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> >>> np.sum(s<t) / float(len(s))\n 0.0090699999999999999 #random\n\n So the p-value is about 0.009, which says the null hypothesis has a\n probability of about 99% of being true.\n\n ";
-static char __pyx_k_233[] = "RandomState.vonmises (line 2303)";
-static char __pyx_k_234[] = "\n vonmises(mu, kappa, size=None)\n\n Draw samples from a von Mises distribution.\n\n Samples are drawn from a von Mises distribution with specified mode\n (mu) and dispersion (kappa), on the interval [-pi, pi].\n\n The von Mises distribution (also known as the circular normal\n distribution) is a continuous probability distribution on the unit\n circle. It may be thought of as the circular analogue of the normal\n distribution.\n\n Parameters\n ----------\n mu : float\n Mode (\"center\") of the distribution.\n kappa : float\n Dispersion of the distribution, has to be >=0.\n size : int or tuple of int\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : scalar or ndarray\n The returned samples, which are in the interval [-pi, pi].\n\n See Also\n --------\n scipy.stats.distributions.vonmises : probability density function,\n distribution, or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the von Mises distribution is\n\n .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},\n\n where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,\n and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.\n\n The von Mises is named for Richard Edler von Mises, who was born in\n Austria-Hungary, in what is now the Ukraine. He fled to the United\n States in 1939 and became a professor at Harvard. He worked in\n probability theory, aerodynamics, fluid mechanics, and philosophy of\n science.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A. (ed.), *Handbook of Mathematical\n Functions*, New York: Dover, 1965.\n\n "" von Mises, R., *Mathematical Theory of Probability and Statistics*,\n New York: Academic Press, 1964.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, kappa = 0.0, 4.0 # mean and dispersion\n >>> s = np.random.vonmises(mu, kappa, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> x = np.arange(-np.pi, np.pi, 2*np.pi/50.)\n >>> y = -np.exp(kappa*np.cos(x-mu))/(2*np.pi*sps.jn(0,kappa))\n >>> plt.plot(x, y/max(y), linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_235[] = "RandomState.pareto (line 2397)";
-static char __pyx_k_236[] = "\n pareto(a, size=None)\n\n Draw samples from a Pareto II or Lomax distribution with specified shape.\n\n The Lomax or Pareto II distribution is a shifted Pareto distribution. The\n classical Pareto distribution can be obtained from the Lomax distribution\n by adding the location parameter m, see below. The smallest value of the\n Lomax distribution is zero while for the classical Pareto distribution it\n is m, where the standard Pareto distribution has location m=1.\n Lomax can also be considered as a simplified version of the Generalized\n Pareto distribution (available in SciPy), with the scale set to one and\n the location set to zero.\n\n The Pareto distribution must be greater than zero, and is unbounded above.\n It is also known as the \"80-20 rule\". In this distribution, 80 percent of\n the weights are in the lowest 20 percent of the range, while the other 20\n percent fill the remaining 80 percent of the range.\n\n Parameters\n ----------\n shape : float, > 0.\n Shape of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.lomax.pdf : probability density function,\n distribution or cumulative density function, etc.\n scipy.stats.distributions.genpareto.pdf : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Pareto distribution is\n\n .. math:: p(x) = \\frac{am^a}{x^{a+1}}\n\n where :math:`a` is the shape and :math:`m` the location\n\n The Pareto distribution, named after the Italian economist Vilfredo Pareto,\n is a power law probability distribution useful in many real world probl""ems.\n Outside the field of economics it is generally referred to as the Bradford\n distribution. Pareto developed the distribution to describe the\n distribution of wealth in an economy. It has also found use in insurance,\n web page access statistics, oil field sizes, and many other problems,\n including the download frequency for projects in Sourceforge [1]. It is\n one of the so-called \"fat-tailed\" distributions.\n\n\n References\n ----------\n .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of\n Sourceforge projects.\n .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.\n .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme\n Values, Birkhauser Verlag, Basel, pp 23-30.\n .. [4] Wikipedia, \"Pareto distribution\",\n http://en.wikipedia.org/wiki/Pareto_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a, m = 3., 1. # shape and mode\n >>> s = np.random.pareto(a, 1000) + m\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='center')\n >>> fit = a*m**a/bins**(a+1)\n >>> plt.plot(bins, max(count)*fit/max(fit),linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_237[] = "RandomState.weibull (line 2493)";
-static char __pyx_k_238[] = "\n weibull(a, size=None)\n\n Weibull distribution.\n\n Draw samples from a 1-parameter Weibull distribution with the given\n shape parameter `a`.\n\n .. math:: X = (-ln(U))^{1/a}\n\n Here, U is drawn from the uniform distribution over (0,1].\n\n The more common 2-parameter Weibull, including a scale parameter\n :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.\n\n Parameters\n ----------\n a : float\n Shape of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.weibull_max\n scipy.stats.distributions.weibull_min\n scipy.stats.distributions.genextreme\n gumbel\n\n Notes\n -----\n The Weibull (or Type III asymptotic extreme value distribution for smallest\n values, SEV Type III, or Rosin-Rammler distribution) is one of a class of\n Generalized Extreme Value (GEV) distributions used in modeling extreme\n value problems. This class includes the Gumbel and Frechet distributions.\n\n The probability density for the Weibull distribution is\n\n .. math:: p(x) = \\frac{a}\n {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},\n\n where :math:`a` is the shape and :math:`\\lambda` the scale.\n\n The function has its peak (the mode) at\n :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.\n\n When ``a = 1``, the Weibull distribution reduces to the exponential\n distribution.\n\n References\n ----------\n .. [1] Waloddi Weibull, Professor, Royal Technical University, Stockholm,\n 1939 \"A Statistical Theory Of The Strength Of Materials\",\n Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,\n General""stabens Litografiska Anstalts Forlag, Stockholm.\n .. [2] Waloddi Weibull, 1951 \"A Statistical Distribution Function of Wide\n Applicability\", Journal Of Applied Mechanics ASME Paper.\n .. [3] Wikipedia, \"Weibull distribution\",\n http://en.wikipedia.org/wiki/Weibull_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 5. # shape\n >>> s = np.random.weibull(a, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> x = np.arange(1,100.)/50.\n >>> def weib(x,n,a):\n ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)\n\n >>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000))\n >>> x = np.arange(1,100.)/50.\n >>> scale = count.max()/weib(x, 1., 5.).max()\n >>> plt.plot(x, weib(x, 1., 5.)*scale)\n >>> plt.show()\n\n ";
-static char __pyx_k_239[] = "RandomState.power (line 2593)";
-static char __pyx_k_240[] = "\n power(a, size=None)\n\n Draws samples in [0, 1] from a power distribution with positive\n exponent a - 1.\n\n Also known as the power function distribution.\n\n Parameters\n ----------\n a : float\n parameter, > 0\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n The returned samples lie in [0, 1].\n\n Raises\n ------\n ValueError\n If a<1.\n\n Notes\n -----\n The probability density function is\n\n .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.\n\n The power function distribution is just the inverse of the Pareto\n distribution. It may also be seen as a special case of the Beta\n distribution.\n\n It is used, for example, in modeling the over-reporting of insurance\n claims.\n\n References\n ----------\n .. [1] Christian Kleiber, Samuel Kotz, \"Statistical size distributions\n in economics and actuarial sciences\", Wiley, 2003.\n .. [2] Heckert, N. A. and Filliben, James J. (2003). NIST Handbook 148:\n Dataplot Reference Manual, Volume 2: Let Subcommands and Library\n Functions\", National Institute of Standards and Technology Handbook\n Series, June 2003.\n http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 5. # shape\n >>> samples = 1000\n >>> s = np.random.power(a, samples)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, bins=""30)\n >>> x = np.linspace(0, 1, 100)\n >>> y = a*x**(a-1.)\n >>> normed_y = samples*np.diff(bins)[0]*y\n >>> plt.plot(x, normed_y)\n >>> plt.show()\n\n Compare the power function distribution to the inverse of the Pareto.\n\n >>> from scipy import stats\n >>> rvs = np.random.power(5, 1000000)\n >>> rvsp = np.random.pareto(5, 1000000)\n >>> xx = np.linspace(0,1,100)\n >>> powpdf = stats.powerlaw.pdf(xx,5)\n\n >>> plt.figure()\n >>> plt.hist(rvs, bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('np.random.power(5)')\n\n >>> plt.figure()\n >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('inverse of 1 + np.random.pareto(5)')\n\n >>> plt.figure()\n >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('inverse of stats.pareto(5)')\n\n ";
-static char __pyx_k_241[] = "RandomState.laplace (line 2702)";
-static char __pyx_k_242[] = "\n laplace(loc=0.0, scale=1.0, size=None)\n\n Draw samples from the Laplace or double exponential distribution with\n specified location (or mean) and scale (decay).\n\n The Laplace distribution is similar to the Gaussian/normal distribution,\n but is sharper at the peak and has fatter tails. It represents the\n difference between two independent, identically distributed exponential\n random variables.\n\n Parameters\n ----------\n loc : float\n The position, :math:`\\mu`, of the distribution peak.\n scale : float\n :math:`\\lambda`, the exponential decay.\n\n Notes\n -----\n It has the probability density function\n\n .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}\n \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).\n\n The first law of Laplace, from 1774, states that the frequency of an error\n can be expressed as an exponential function of the absolute magnitude of\n the error, which leads to the Laplace distribution. For many problems in\n Economics and Health sciences, this distribution seems to model the data\n better than the standard Gaussian distribution\n\n\n References\n ----------\n .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). Handbook of Mathematical\n Functions with Formulas, Graphs, and Mathematical Tables, 9th\n printing. New York: Dover, 1972.\n\n .. [2] The Laplace distribution and generalizations\n By Samuel Kotz, Tomasz J. Kozubowski, Krzysztof Podgorski,\n Birkhauser, 2001.\n\n .. [3] Weisstein, Eric W. \"Laplace Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/LaplaceDistribution.html\n\n .. [4] Wikipedia, \"Laplace distribution\",\n http://en.wikipedia.org/wik""i/Laplace_distribution\n\n Examples\n --------\n Draw samples from the distribution\n\n >>> loc, scale = 0., 1.\n >>> s = np.random.laplace(loc, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> x = np.arange(-8., 8., .01)\n >>> pdf = np.exp(-abs(x-loc/scale))/(2.*scale)\n >>> plt.plot(x, pdf)\n\n Plot Gaussian for comparison:\n\n >>> g = (1/(scale * np.sqrt(2 * np.pi)) * \n ... np.exp( - (x - loc)**2 / (2 * scale**2) ))\n >>> plt.plot(x,g)\n\n ";
-static char __pyx_k_243[] = "RandomState.gumbel (line 2792)";
-static char __pyx_k_244[] = "\n gumbel(loc=0.0, scale=1.0, size=None)\n\n Gumbel distribution.\n\n Draw samples from a Gumbel distribution with specified location and scale.\n For more information on the Gumbel distribution, see Notes and References\n below.\n\n Parameters\n ----------\n loc : float\n The location of the mode of the distribution.\n scale : float\n The scale parameter of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n out : ndarray\n The samples\n\n See Also\n --------\n scipy.stats.gumbel_l\n scipy.stats.gumbel_r\n scipy.stats.genextreme\n probability density function, distribution, or cumulative density\n function, etc. for each of the above\n weibull\n\n Notes\n -----\n The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value\n Type I) distribution is one of a class of Generalized Extreme Value (GEV)\n distributions used in modeling extreme value problems. The Gumbel is a\n special case of the Extreme Value Type I distribution for maximums from\n distributions with \"exponential-like\" tails.\n\n The probability density for the Gumbel distribution is\n\n .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/\n \\beta}},\n\n where :math:`\\mu` is the mode, a location parameter, and :math:`\\beta` is\n the scale parameter.\n\n The Gumbel (named for German mathematician Emil Julius Gumbel) was used\n very early in the hydrology literature, for modeling the occurrence of\n flood events. It is also used for modeling maximum wind speed and rainfall\n rates. It is a \"fat-tailed\" distribution - the ""probability of an event in\n the tail of the distribution is larger than if one used a Gaussian, hence\n the surprisingly frequent occurrence of 100-year floods. Floods were\n initially modeled as a Gaussian process, which underestimated the frequency\n of extreme events.\n\n\n It is one of a class of extreme value distributions, the Generalized\n Extreme Value (GEV) distributions, which also includes the Weibull and\n Frechet.\n\n The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance of\n :math:`\\frac{\\pi^2}{6}\\beta^2`.\n\n References\n ----------\n Gumbel, E. J., *Statistics of Extremes*, New York: Columbia University\n Press, 1958.\n\n Reiss, R.-D. and Thomas, M., *Statistical Analysis of Extreme Values from\n Insurance, Finance, Hydrology and Other Fields*, Basel: Birkhauser Verlag,\n 2001.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, beta = 0, 0.1 # location and scale\n >>> s = np.random.gumbel(mu, beta, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp( -np.exp( -(bins - mu) /beta) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n Show how an extreme value distribution can arise from a Gaussian process\n and compare to a Gaussian:\n\n >>> means = []\n >>> maxima = []\n >>> for i in range(0,1000) :\n ... a = np.random.normal(mu, beta, 1000)\n ... means.append(a.mean())\n ... maxima.append(a.max())\n >>> count, bins, ignored = plt.hist(maxima, 30, normed=True)\n >>> beta = np.std(maxima)*np.pi/np.sqrt(6)""\n >>> mu = np.mean(maxima) - 0.57721*beta\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp(-np.exp(-(bins - mu)/beta)),\n ... linewidth=2, color='r')\n >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))\n ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),\n ... linewidth=2, color='g')\n >>> plt.show()\n\n ";
-static char __pyx_k_245[] = "RandomState.logistic (line 2923)";
-static char __pyx_k_246[] = "\n logistic(loc=0.0, scale=1.0, size=None)\n\n Draw samples from a Logistic distribution.\n\n Samples are drawn from a Logistic distribution with specified\n parameters, loc (location or mean, also median), and scale (>0).\n\n Parameters\n ----------\n loc : float\n\n scale : float > 0.\n\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.logistic : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Logistic distribution is\n\n .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},\n\n where :math:`\\mu` = location and :math:`s` = scale.\n\n The Logistic distribution is used in Extreme Value problems where it\n can act as a mixture of Gumbel distributions, in Epidemiology, and by\n the World Chess Federation (FIDE) where it is used in the Elo ranking\n system, assuming the performance of each player is a logistically\n distributed random variable.\n\n References\n ----------\n .. [1] Reiss, R.-D. and Thomas M. (2001), Statistical Analysis of Extreme\n Values, from Insurance, Finance, Hydrology and Other Fields,\n Birkhauser Verlag, Basel, pp 132-133.\n .. [2] Weisstein, Eric W. \"Logistic Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/LogisticDistribution.html\n .. [3] Wikipedia, \"Logistic-distribution\",\n http://en.wikipedia.org/wiki/Logistic-distribution\n\n Examples\n "" --------\n Draw samples from the distribution:\n\n >>> loc, scale = 10, 1\n >>> s = np.random.logistic(loc, scale, 10000)\n >>> count, bins, ignored = plt.hist(s, bins=50)\n\n # plot against distribution\n\n >>> def logist(x, loc, scale):\n ... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2)\n >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\\\n ... logist(bins, loc, scale).max())\n >>> plt.show()\n\n ";
-static char __pyx_k_247[] = "RandomState.lognormal (line 3011)";
-static char __pyx_k_248[] = "\n lognormal(mean=0.0, sigma=1.0, size=None)\n\n Return samples drawn from a log-normal distribution.\n\n Draw samples from a log-normal distribution with specified mean,\n standard deviation, and array shape. Note that the mean and standard\n deviation are not the values for the distribution itself, but of the\n underlying normal distribution it is derived from.\n\n Parameters\n ----------\n mean : float\n Mean value of the underlying normal distribution\n sigma : float, > 0.\n Standard deviation of the underlying normal distribution\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or float\n The desired samples. An array of the same shape as `size` if given,\n if `size` is None a float is returned.\n\n See Also\n --------\n scipy.stats.lognorm : probability density function, distribution,\n cumulative density function, etc.\n\n Notes\n -----\n A variable `x` has a log-normal distribution if `log(x)` is normally\n distributed. The probability density function for the log-normal\n distribution is:\n\n .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}\n e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}\n\n where :math:`\\mu` is the mean and :math:`\\sigma` is the standard\n deviation of the normally distributed logarithm of the variable.\n A log-normal distribution results if a random variable is the *product*\n of a large number of independent, identically-distributed variables in\n the same way that a normal distribution results if the variable is the\n *sum* of a large number of independent, identically-distributed\n variables.\n\n Reference""s\n ----------\n Limpert, E., Stahel, W. A., and Abbt, M., \"Log-normal Distributions\n across the Sciences: Keys and Clues,\" *BioScience*, Vol. 51, No. 5,\n May, 2001. http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf\n\n Reiss, R.D. and Thomas, M., *Statistical Analysis of Extreme Values*,\n Basel: Birkhauser Verlag, 2001, pp. 31-32.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, sigma = 3., 1. # mean and standard deviation\n >>> s = np.random.lognormal(mu, sigma, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')\n\n >>> x = np.linspace(min(bins), max(bins), 10000)\n >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))\n ... / (x * sigma * np.sqrt(2 * np.pi)))\n\n >>> plt.plot(x, pdf, linewidth=2, color='r')\n >>> plt.axis('tight')\n >>> plt.show()\n\n Demonstrate that taking the products of random samples from a uniform\n distribution can be fit well by a log-normal probability density function.\n\n >>> # Generate a thousand samples: each is the product of 100 random\n >>> # values, drawn from a normal distribution.\n >>> b = []\n >>> for i in range(1000):\n ... a = 10. + np.random.random(100)\n ... b.append(np.product(a))\n\n >>> b = np.array(b) / np.min(b) # scale values to be positive\n >>> count, bins, ignored = plt.hist(b, 100, normed=True, align='center')\n >>> sigma = np.std(np.log(b))\n >>> mu = np.mean(np.log(b))\n\n >>> x = np.linspace(min(bins), max(bins), 10000)\n >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))\n ... / (x * sigma * np.sqrt(2 * np.pi)))\n\n >>> plt.plot(x, pdf, co""lor='r', linewidth=2)\n >>> plt.show()\n\n ";
-static char __pyx_k_249[] = "RandomState.rayleigh (line 3132)";
-static char __pyx_k_250[] = "\n rayleigh(scale=1.0, size=None)\n\n Draw samples from a Rayleigh distribution.\n\n The :math:`\\chi` and Weibull distributions are generalizations of the\n Rayleigh.\n\n Parameters\n ----------\n scale : scalar\n Scale, also equals the mode. Should be >= 0.\n size : int or tuple of ints, optional\n Shape of the output. Default is None, in which case a single\n value is returned.\n\n Notes\n -----\n The probability density function for the Rayleigh distribution is\n\n .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}\n\n The Rayleigh distribution arises if the wind speed and wind direction are\n both gaussian variables, then the vector wind velocity forms a Rayleigh\n distribution. The Rayleigh distribution is used to model the expected\n output from wind turbines.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., Rayleigh Distribution,\n http://www.brighton-webs.co.uk/distributions/rayleigh.asp\n .. [2] Wikipedia, \"Rayleigh distribution\"\n http://en.wikipedia.org/wiki/Rayleigh_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)\n\n Wave heights tend to follow a Rayleigh distribution. If the mean wave\n height is 1 meter, what fraction of waves are likely to be larger than 3\n meters?\n\n >>> meanvalue = 1\n >>> modevalue = np.sqrt(2 / np.pi) * meanvalue\n >>> s = np.random.rayleigh(modevalue, 1000000)\n\n The percentage of waves larger than 3 meters is:\n\n >>> 100.*sum(s>3)/1000000.\n 0.087300000000000003\n\n ";
-static char __pyx_k_251[] = "RandomState.wald (line 3204)";
-static char __pyx_k_252[] = "\n wald(mean, scale, size=None)\n\n Draw samples from a Wald, or Inverse Gaussian, distribution.\n\n As the scale approaches infinity, the distribution becomes more like a\n Gaussian.\n\n Some references claim that the Wald is an Inverse Gaussian with mean=1, but\n this is by no means universal.\n\n The Inverse Gaussian distribution was first studied in relationship to\n Brownian motion. In 1956 M.C.K. Tweedie used the name Inverse Gaussian\n because there is an inverse relationship between the time to cover a unit\n distance and distance covered in unit time.\n\n Parameters\n ----------\n mean : scalar\n Distribution mean, should be > 0.\n scale : scalar\n Scale parameter, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n Drawn sample, all greater than zero.\n\n Notes\n -----\n The probability density function for the Wald distribution is\n\n .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^\n \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}\n\n As noted above the Inverse Gaussian distribution first arise from attempts\n to model Brownian Motion. It is also a competitor to the Weibull for use in\n reliability modeling and modeling stock returns and interest rate\n processes.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., Wald Distribution,\n http://www.brighton-webs.co.uk/distributions/wald.asp\n .. [2] Chhikara, Raj S., and Folks, J. Leroy, \"The Inverse Gaussian\n Distribution: Theory : Methodology, and Applications\", CRC Press,\n 1988.\n .. [3] Wikipedia, \"Wald distribu""tion\"\n http://en.wikipedia.org/wiki/Wald_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)\n >>> plt.show()\n\n ";
-static char __pyx_k_253[] = "RandomState.triangular (line 3290)";
-static char __pyx_k_254[] = "\n triangular(left, mode, right, size=None)\n\n Draw samples from the triangular distribution.\n\n The triangular distribution is a continuous probability distribution with\n lower limit left, peak at mode, and upper limit right. Unlike the other\n distributions, these parameters directly define the shape of the pdf.\n\n Parameters\n ----------\n left : scalar\n Lower limit.\n mode : scalar\n The value where the peak of the distribution occurs.\n The value should fulfill the condition ``left <= mode <= right``.\n right : scalar\n Upper limit, should be larger than `left`.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n The returned samples all lie in the interval [left, right].\n\n Notes\n -----\n The probability density function for the Triangular distribution is\n\n .. math:: P(x;l, m, r) = \\begin{cases}\n \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\\n \\frac{2(m-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\\n 0& \\text{otherwise}.\n \\end{cases}\n\n The triangular distribution is often used in ill-defined problems where the\n underlying distribution is not known, but some knowledge of the limits and\n mode exists. Often it is used in simulations.\n\n References\n ----------\n .. [1] Wikipedia, \"Triangular distribution\"\n http://en.wikipedia.org/wiki/Triangular_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=""200,\n ... normed=True)\n >>> plt.show()\n\n ";
-static char __pyx_k_255[] = "RandomState.binomial (line 3378)";
-static char __pyx_k_256[] = "\n binomial(n, p, size=None)\n\n Draw samples from a binomial distribution.\n\n Samples are drawn from a Binomial distribution with specified\n parameters, n trials and p probability of success where\n n an integer > 0 and p is in the interval [0,1]. (n may be\n input as a float, but it is truncated to an integer in use)\n\n Parameters\n ----------\n n : float (but truncated to an integer)\n parameter, > 0.\n p : float\n parameter, >= 0 and <=1.\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.binom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Binomial distribution is\n\n .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},\n\n where :math:`n` is the number of trials, :math:`p` is the probability\n of success, and :math:`N` is the number of successes.\n\n When estimating the standard error of a proportion in a population by\n using a random sample, the normal distribution works well unless the\n product p*n <=5, where p = population proportion estimate, and n =\n number of samples, in which case the binomial distribution is used\n instead. For example, a sample of 15 people shows 4 who are left\n handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,\n so the binomial distribution should be used in this case.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics with R\",\n Springer-Verlag, 2002.\n "" .. [2] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [3] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [4] Weisstein, Eric W. \"Binomial Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/BinomialDistribution.html\n .. [5] Wikipedia, \"Binomial-distribution\",\n http://en.wikipedia.org/wiki/Binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> n, p = 10, .5 # number of trials, probability of each trial\n >>> s = np.random.binomial(n, p, 1000)\n # result of flipping a coin 10 times, tested 1000 times.\n\n A real world example. A company drills 9 wild-cat oil exploration\n wells, each with an estimated probability of success of 0.1. All nine\n wells fail. What is the probability of that happening?\n\n Let's do 20,000 trials of the model, and count the number that\n generate zero positive results.\n\n >>> sum(np.random.binomial(9,0.1,20000)==0)/20000.\n answer = 0.38885, or 38%.\n\n ";
-static char __pyx_k_257[] = "RandomState.negative_binomial (line 3486)";
-static char __pyx_k_258[] = "\n negative_binomial(n, p, size=None)\n\n Draw samples from a negative_binomial distribution.\n\n Samples are drawn from a negative_Binomial distribution with specified\n parameters, `n` trials and `p` probability of success where `n` is an\n integer > 0 and `p` is in the interval [0, 1].\n\n Parameters\n ----------\n n : int\n Parameter, > 0.\n p : float\n Parameter, >= 0 and <=1.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : int or ndarray of ints\n Drawn samples.\n\n Notes\n -----\n The probability density for the Negative Binomial distribution is\n\n .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},\n\n where :math:`n-1` is the number of successes, :math:`p` is the probability\n of success, and :math:`N+n-1` is the number of trials.\n\n The negative binomial distribution gives the probability of n-1 successes\n and N failures in N+n-1 trials, and success on the (N+n)th trial.\n\n If one throws a die repeatedly until the third time a \"1\" appears, then the\n probability distribution of the number of non-\"1\"s that appear before the\n third \"1\" is a negative binomial distribution.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Negative Binomial Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NegativeBinomialDistribution.html\n .. [2] Wikipedia, \"Negative binomial distribution\",\n http://en.wikipedia.org/wiki/Negative_binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n A real world example. A company drills wild-cat oil exploration well""s, each\n with an estimated probability of success of 0.1. What is the probability\n of having one success for each successive well, that is what is the\n probability of a single success after drilling 5 wells, after 6 wells,\n etc.?\n\n >>> s = np.random.negative_binomial(1, 0.1, 100000)\n >>> for i in range(1, 11):\n ... probability = sum(s<i) / 100000.\n ... print i, \"wells drilled, probability of one success =\", probability\n\n ";
-static char __pyx_k_259[] = "RandomState.poisson (line 3581)";
-static char __pyx_k_260[] = "\n poisson(lam=1.0, size=None)\n\n Draw samples from a Poisson distribution.\n\n The Poisson distribution is the limit of the Binomial\n distribution for large N.\n\n Parameters\n ----------\n lam : float\n Expectation of interval, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Notes\n -----\n The Poisson distribution\n\n .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}\n\n For events with an expected separation :math:`\\lambda` the Poisson\n distribution :math:`f(k; \\lambda)` describes the probability of\n :math:`k` events occurring within the observed interval :math:`\\lambda`.\n\n Because the output is limited to the range of the C long type, a\n ValueError is raised when `lam` is within 10 sigma of the maximum\n representable value.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Poisson Distribution.\" From MathWorld--A Wolfram\n Web Resource. http://mathworld.wolfram.com/PoissonDistribution.html\n .. [2] Wikipedia, \"Poisson distribution\",\n http://en.wikipedia.org/wiki/Poisson_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> import numpy as np\n >>> s = np.random.poisson(5, 10000)\n\n Display histogram of the sample:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 14, normed=True)\n >>> plt.show()\n\n ";
-static char __pyx_k_261[] = "RandomState.zipf (line 3652)";
-static char __pyx_k_262[] = "\n zipf(a, size=None)\n\n Draw samples from a Zipf distribution.\n\n Samples are drawn from a Zipf distribution with specified parameter\n `a` > 1.\n\n The Zipf distribution (also known as the zeta distribution) is a\n continuous probability distribution that satisfies Zipf's law: the\n frequency of an item is inversely proportional to its rank in a\n frequency table.\n\n Parameters\n ----------\n a : float > 1\n Distribution parameter.\n size : int or tuple of int, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn; a single integer is equivalent in\n its result to providing a mono-tuple, i.e., a 1-D array of length\n *size* is returned. The default is None, in which case a single\n scalar is returned.\n\n Returns\n -------\n samples : scalar or ndarray\n The returned samples are greater than or equal to one.\n\n See Also\n --------\n scipy.stats.distributions.zipf : probability density function,\n distribution, or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Zipf distribution is\n\n .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},\n\n where :math:`\\zeta` is the Riemann Zeta function.\n\n It is named for the American linguist George Kingsley Zipf, who noted\n that the frequency of any word in a sample of a language is inversely\n proportional to its rank in the frequency table.\n\n References\n ----------\n Zipf, G. K., *Selected Studies of the Principle of Relative Frequency\n in Language*, Cambridge, MA: Harvard Univ. Press, 1932.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 2. # parameter\n >>> s = np.random.zipf""(a, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n Truncate s values at 50 so plot is interesting\n >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)\n >>> x = np.arange(1., 50.)\n >>> y = x**(-a)/sps.zetac(a)\n >>> plt.plot(x, y/max(y), linewidth=2, color='r')\n >>> plt.show()\n\n ";
-static char __pyx_k_263[] = "RandomState.geometric (line 3740)";
-static char __pyx_k_264[] = "\n geometric(p, size=None)\n\n Draw samples from the geometric distribution.\n\n Bernoulli trials are experiments with one of two outcomes:\n success or failure (an example of such an experiment is flipping\n a coin). The geometric distribution models the number of trials\n that must be run in order to achieve success. It is therefore\n supported on the positive integers, ``k = 1, 2, ...``.\n\n The probability mass function of the geometric distribution is\n\n .. math:: f(k) = (1 - p)^{k - 1} p\n\n where `p` is the probability of success of an individual trial.\n\n Parameters\n ----------\n p : float\n The probability of success of an individual trial.\n size : tuple of ints\n Number of values to draw from the distribution. The output\n is shaped according to `size`.\n\n Returns\n -------\n out : ndarray\n Samples from the geometric distribution, shaped according to\n `size`.\n\n Examples\n --------\n Draw ten thousand values from the geometric distribution,\n with the probability of an individual success equal to 0.35:\n\n >>> z = np.random.geometric(p=0.35, size=10000)\n\n How many trials succeeded after a single run?\n\n >>> (z == 1).sum() / 10000.\n 0.34889999999999999 #random\n\n ";
-static char __pyx_k_265[] = "RandomState.hypergeometric (line 3806)";
-static char __pyx_k_266[] = "\n hypergeometric(ngood, nbad, nsample, size=None)\n\n Draw samples from a Hypergeometric distribution.\n\n Samples are drawn from a Hypergeometric distribution with specified\n parameters, ngood (ways to make a good selection), nbad (ways to make\n a bad selection), and nsample = number of items sampled, which is less\n than or equal to the sum ngood + nbad.\n\n Parameters\n ----------\n ngood : float (but truncated to an integer)\n parameter, > 0.\n nbad : float\n parameter, >= 0.\n nsample : float\n parameter, > 0 and <= ngood+nbad\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.hypergeom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Hypergeometric distribution is\n\n .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}},\n\n where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n`\n\n for P(x) the probability of x successes, n = ngood, m = nbad, and\n N = number of samples.\n\n Consider an urn with black and white marbles in it, ngood of them\n black and nbad are white. If you draw nsample balls without\n replacement, then the Hypergeometric distribution describes the\n distribution of black balls in the drawn sample.\n\n Note that this distribution is very similar to the Binomial\n distribution, except that in this case, samples are drawn without\n replacement, whereas in the Binomial case samples are drawn wit""h\n replacement (or the sample space is infinite). As the sample space\n becomes large, this distribution approaches the Binomial.\n\n References\n ----------\n .. [1] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [2] Weisstein, Eric W. \"Hypergeometric Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/HypergeometricDistribution.html\n .. [3] Wikipedia, \"Hypergeometric-distribution\",\n http://en.wikipedia.org/wiki/Hypergeometric-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> ngood, nbad, nsamp = 100, 2, 10\n # number of good, number of bad, and number of samples\n >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)\n >>> hist(s)\n # note that it is very unlikely to grab both bad items\n\n Suppose you have an urn with 15 white and 15 black marbles.\n If you pull 15 marbles at random, how likely is it that\n 12 or more of them are one color?\n\n >>> s = np.random.hypergeometric(15, 15, 15, 100000)\n >>> sum(s>=12)/100000. + sum(s<=3)/100000.\n # answer = 0.003 ... pretty unlikely!\n\n ";
-static char __pyx_k_267[] = "RandomState.logseries (line 3925)";
-static char __pyx_k_268[] = "\n logseries(p, size=None)\n\n Draw samples from a Logarithmic Series distribution.\n\n Samples are drawn from a Log Series distribution with specified\n parameter, p (probability, 0 < p < 1).\n\n Parameters\n ----------\n loc : float\n\n scale : float > 0.\n\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.logser : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Log Series distribution is\n\n .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},\n\n where p = probability.\n\n The Log Series distribution is frequently used to represent species\n richness and occurrence, first proposed by Fisher, Corbet, and\n Williams in 1943 [2]. It may also be used to model the numbers of\n occupants seen in cars [3].\n\n References\n ----------\n .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional\n species diversity through the log series distribution of\n occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,\n Volume 5, Number 5, September 1999 , pp. 187-195(9).\n .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The\n relation between the number of species and the number of\n individuals in a random sample of an animal population.\n Journal of Animal Ecology, 12:42-58.\n .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small\n Data Sets, CRC Press, 1994.\n .. [4] Wikipedia, \"Log""arithmic-distribution\",\n http://en.wikipedia.org/wiki/Logarithmic-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = .6\n >>> s = np.random.logseries(a, 10000)\n >>> count, bins, ignored = plt.hist(s)\n\n # plot against distribution\n\n >>> def logseries(k, p):\n ... return -p**k/(k*log(1-p))\n >>> plt.plot(bins, logseries(bins, a)*count.max()/\n logseries(bins, a).max(), 'r')\n >>> plt.show()\n\n ";
-static char __pyx_k_269[] = "RandomState.multivariate_normal (line 4020)";
-static char __pyx_k_270[] = "\n multivariate_normal(mean, cov[, size])\n\n Draw random samples from a multivariate normal distribution.\n\n The multivariate normal, multinormal or Gaussian distribution is a\n generalization of the one-dimensional normal distribution to higher\n dimensions. Such a distribution is specified by its mean and\n covariance matrix. These parameters are analogous to the mean\n (average or \"center\") and variance (standard deviation, or \"width,\"\n squared) of the one-dimensional normal distribution.\n\n Parameters\n ----------\n mean : 1-D array_like, of length N\n Mean of the N-dimensional distribution.\n cov : 2-D array_like, of shape (N, N)\n Covariance matrix of the distribution. Must be symmetric and\n positive semi-definite for \"physically meaningful\" results.\n size : tuple of ints, optional\n Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are\n generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because\n each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.\n If no shape is specified, a single (`N`-D) sample is returned.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Notes\n -----\n The mean is a coordinate in N-dimensional space, which represents the\n location where samples are most likely to be generated. This is\n analogous to the peak of the bell curve for the one-dimensional or\n univariate normal distribution.\n\n Covariance indicates the level to which two variables vary together.\n From the multivariate normal distribution, we draw ""N-dimensional\n samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix\n element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.\n The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its\n \"spread\").\n\n Instead of specifying the full covariance matrix, popular\n approximations include:\n\n - Spherical covariance (*cov* is a multiple of the identity matrix)\n - Diagonal covariance (*cov* has non-negative elements, and only on\n the diagonal)\n\n This geometrical property can be seen in two dimensions by plotting\n generated data-points:\n\n >>> mean = [0,0]\n >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis\n\n >>> import matplotlib.pyplot as plt\n >>> x,y = np.random.multivariate_normal(mean,cov,5000).T\n >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()\n\n Note that the covariance matrix must be non-negative definite.\n\n References\n ----------\n Papoulis, A., *Probability, Random Variables, and Stochastic Processes*,\n 3rd ed., New York: McGraw-Hill, 1991.\n\n Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*,\n 2nd ed., New York: Wiley, 2001.\n\n Examples\n --------\n >>> mean = (1,2)\n >>> cov = [[1,0],[1,0]]\n >>> x = np.random.multivariate_normal(mean,cov,(3,3))\n >>> x.shape\n (3, 3, 2)\n\n The following is probably true, given that 0.6 is roughly twice the\n standard deviation:\n\n >>> print list( (x[0,0,:] - mean) < 0.6 )\n [True, True]\n\n ";
-static char __pyx_k_271[] = "RandomState.multinomial (line 4152)";
-static char __pyx_k_272[] = "\n multinomial(n, pvals, size=None)\n\n Draw samples from a multinomial distribution.\n\n The multinomial distribution is a multivariate generalisation of the\n binomial distribution. Take an experiment with one of ``p``\n possible outcomes. An example of such an experiment is throwing a dice,\n where the outcome can be 1 through 6. Each sample drawn from the\n distribution represents `n` such experiments. Its values,\n ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome\n was ``i``.\n\n Parameters\n ----------\n n : int\n Number of experiments.\n pvals : sequence of floats, length p\n Probabilities of each of the ``p`` different outcomes. These\n should sum to 1 (however, the last element is always assumed to\n account for the remaining probability, as long as\n ``sum(pvals[:-1]) <= 1)``.\n size : tuple of ints\n Given a `size` of ``(M, N, K)``, then ``M*N*K`` samples are drawn,\n and the output shape becomes ``(M, N, K, p)``, since each sample\n has shape ``(p,)``.\n\n Examples\n --------\n Throw a dice 20 times:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=1)\n array([[4, 1, 7, 5, 2, 1]])\n\n It landed 4 times on 1, once on 2, etc.\n\n Now, throw the dice 20 times, and 20 times again:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=2)\n array([[3, 4, 3, 3, 4, 3],\n [2, 4, 3, 4, 0, 7]])\n\n For the first run, we threw 3 times 1, 4 times 2, etc. For the second,\n we threw 2 times 1, 4 times 2, etc.\n\n A loaded dice is more likely to land on number 6:\n\n >>> np.random.multinomial(100, [1/7.]*5)\n array([13, 16, 13, 16, 42])\n\n ";
-static char __pyx_k_273[] = "RandomState.dirichlet (line 4245)";
-static char __pyx_k_274[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : array\n Number of samples to draw.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (alpha.ndim, size).\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece had, on average, a designated\n average length, but allowing some variation in the relative sizes of the\n pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')""\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n ";
-static char __pyx_k_275[] = "RandomState.shuffle (line 4361)";
-static char __pyx_k_276[] = "\n shuffle(x)\n\n Modify a sequence in-place by shuffling its contents.\n\n Parameters\n ----------\n x : array_like\n The array or list to be shuffled.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> arr = np.arange(10)\n >>> np.random.shuffle(arr)\n >>> arr\n [1 7 5 2 9 4 3 6 0 8]\n\n This function only shuffles the array along the first index of a\n multi-dimensional array:\n\n >>> arr = np.arange(9).reshape((3, 3))\n >>> np.random.shuffle(arr)\n >>> arr\n array([[3, 4, 5],\n [6, 7, 8],\n [0, 1, 2]])\n\n ";
-static char __pyx_k_277[] = "RandomState.permutation (line 4419)";
-static char __pyx_k_278[] = "\n permutation(x)\n\n Randomly permute a sequence, or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Parameters\n ----------\n x : int or array_like\n If `x` is an integer, randomly permute ``np.arange(x)``.\n If `x` is an array, make a copy and shuffle the elements\n randomly.\n\n Returns\n -------\n out : ndarray\n Permuted sequence or array range.\n\n Examples\n --------\n >>> np.random.permutation(10)\n array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6])\n\n >>> np.random.permutation([1, 4, 9, 12, 15])\n array([15, 1, 9, 4, 12])\n\n >>> arr = np.arange(9).reshape((3, 3))\n >>> np.random.permutation(arr)\n array([[6, 7, 8],\n [0, 1, 2],\n [3, 4, 5]])\n\n ";
+static char __pyx_k_112[] = "sigma <= 0";
+static char __pyx_k_114[] = "sigma <= 0.0";
+static char __pyx_k_118[] = "scale <= 0.0";
+static char __pyx_k_120[] = "mean <= 0";
+static char __pyx_k_123[] = "mean <= 0.0";
+static char __pyx_k_126[] = "left > mode";
+static char __pyx_k_128[] = "mode > right";
+static char __pyx_k_130[] = "left == right";
+static char __pyx_k_135[] = "n < 0";
+static char __pyx_k_137[] = "p < 0";
+static char __pyx_k_139[] = "p > 1";
+static char __pyx_k_144[] = "n <= 0";
+static char __pyx_k_152[] = "lam < 0";
+static char __pyx_k_154[] = "lam value too large";
+static char __pyx_k_157[] = "lam value too large.";
+static char __pyx_k_159[] = "a <= 1.0";
+static char __pyx_k_162[] = "p < 0.0";
+static char __pyx_k_164[] = "p > 1.0";
+static char __pyx_k_168[] = "ngood < 0";
+static char __pyx_k_170[] = "nbad < 0";
+static char __pyx_k_172[] = "nsample < 1";
+static char __pyx_k_174[] = "ngood + nbad < nsample";
+static char __pyx_k_180[] = "p <= 0.0";
+static char __pyx_k_182[] = "p >= 1.0";
+static char __pyx_k_186[] = "mean must be 1 dimensional";
+static char __pyx_k_188[] = "cov must be 2 dimensional and square";
+static char __pyx_k_190[] = "mean and cov must have same length";
+static char __pyx_k_193[] = "numpy.dual";
+static char __pyx_k_194[] = "sum(pvals[:-1]) > 1.0";
+static char __pyx_k_199[] = "standard_exponential";
+static char __pyx_k_200[] = "noncentral_chisquare";
+static char __pyx_k_201[] = "RandomState.random_sample (line 722)";
+static char __pyx_k_202[] = "\n random_sample(size=None)\n\n Return random floats in the half-open interval [0.0, 1.0).\n\n Results are from the \"continuous uniform\" distribution over the\n stated interval. To sample :math:`Unif[a, b), b > a` multiply\n the output of `random_sample` by `(b-a)` and add `a`::\n\n (b - a) * random_sample() + a\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Defines the shape of the returned array of random floats. If None\n (the default), returns a single float.\n\n Returns\n -------\n out : float or ndarray of floats\n Array of random floats of shape `size` (unless ``size=None``, in which\n case a single float is returned).\n\n Examples\n --------\n >>> np.random.random_sample()\n 0.47108547995356098\n >>> type(np.random.random_sample())\n <type 'float'>\n >>> np.random.random_sample((5,))\n array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428])\n\n Three-by-two array of random numbers from [-5, 0):\n\n >>> 5 * np.random.random_sample((3, 2)) - 5\n array([[-3.99149989, -0.52338984],\n [-2.99091858, -0.79479508],\n [-1.23204345, -1.75224494]])\n\n ";
+static char __pyx_k_203[] = "RandomState.tomaxint (line 765)";
+static char __pyx_k_204[] = "\n tomaxint(size=None)\n\n Random integers between 0 and ``sys.maxint``, inclusive.\n\n Return a sample of uniformly distributed random integers in the interval\n [0, ``sys.maxint``].\n\n Parameters\n ----------\n size : tuple of ints, int, optional\n Shape of output. If this is, for example, (m,n,k), m*n*k samples\n are generated. If no shape is specified, a single sample is\n returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Uniform sampling over a given half-open interval of integers.\n random_integers : Uniform sampling over a given closed interval of\n integers.\n\n Examples\n --------\n >>> RS = np.random.mtrand.RandomState() # need a RandomState object\n >>> RS.tomaxint((2,2,2))\n array([[[1170048599, 1600360186],\n [ 739731006, 1947757578]],\n [[1871712945, 752307660],\n [1601631370, 1479324245]]])\n >>> import sys\n >>> sys.maxint\n 2147483647\n >>> RS.tomaxint((2,2,2)) < sys.maxint\n array([[[ True, True],\n [ True, True]],\n [[ True, True],\n [ True, True]]], dtype=bool)\n\n ";
+static char __pyx_k_205[] = "RandomState.randint (line 812)";
+static char __pyx_k_206[] = "\n randint(low, high=None, size=None)\n\n Return random integers from `low` (inclusive) to `high` (exclusive).\n\n Return random integers from the \"discrete uniform\" distribution in the\n \"half-open\" interval [`low`, `high`). If `high` is None (the default),\n then results are from [0, `low`).\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is the *highest* such\n integer).\n high : int, optional\n If provided, one above the largest (signed) integer to be drawn\n from the distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single int is\n returned.\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.random_integers : similar to `randint`, only for the closed\n interval [`low`, `high`], and 1 is the lowest value if `high` is\n omitted. In particular, this other one is the one to use to generate\n uniformly distributed discrete non-integers.\n\n Examples\n --------\n >>> np.random.randint(2, size=10)\n array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])\n >>> np.random.randint(1, size=10)\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n Generate a 2 x 4 array of ints between 0 and 4, inclusive:\n\n >>> np.random.randint(5, size=(2, 4))\n array([[4, 0, 2, 1],\n [3, 2, 2, 0]])\n\n ";
+static char __pyx_k_207[] = "RandomState.bytes (line 892)";
+static char __pyx_k_208[] = "\n bytes(length)\n\n Return random bytes.\n\n Parameters\n ----------\n length : int\n Number of random bytes.\n\n Returns\n -------\n out : str\n String of length `length`.\n\n Examples\n --------\n >>> np.random.bytes(10)\n ' eh\\x85\\x022SZ\\xbf\\xa4' #random\n\n ";
+static char __pyx_k_209[] = "RandomState.choice (line 920)";
+static char __pyx_k_210[] = "\n choice(a, size=None, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n replace : boolean, optional\n Whether the sample is with or without replacement\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3)\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False)\n array([3,1,0])\n "" >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n ";
+static char __pyx_k_211[] = "RandomState.uniform (line 1092)";
+static char __pyx_k_212[] = "\n uniform(low=0.0, high=1.0, size=1)\n\n Draw samples from a uniform distribution.\n\n Samples are uniformly distributed over the half-open interval\n ``[low, high)`` (includes low, but excludes high). In other words,\n any value within the given interval is equally likely to be drawn\n by `uniform`.\n\n Parameters\n ----------\n low : float, optional\n Lower boundary of the output interval. All values generated will be\n greater than or equal to low. The default value is 0.\n high : float\n Upper boundary of the output interval. All values generated will be\n less than high. The default value is 1.0.\n size : int or tuple of ints, optional\n Shape of output. If the given size is, for example, (m,n,k),\n m*n*k samples are generated. If no shape is specified, a single sample\n is returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Discrete uniform distribution, yielding integers.\n random_integers : Discrete uniform distribution over the closed\n interval ``[low, high]``.\n random_sample : Floats uniformly distributed over ``[0, 1)``.\n random : Alias for `random_sample`.\n rand : Convenience function that accepts dimensions as input, e.g.,\n ``rand(2,2)`` would generate a 2-by-2 array of floats,\n uniformly distributed over ``[0, 1)``.\n\n Notes\n -----\n The probability density function of the uniform distribution is\n\n .. math:: p(x) = \\frac{1}{b - a}\n\n anywhere within the interval ``[a, b)``, and zero elsewhere.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> s = np.random.uniform(-1,0,1000)\n\n All values are w""ithin the given interval:\n\n >>> np.all(s >= -1)\n True\n >>> np.all(s < 0)\n True\n\n Display the histogram of the samples, along with the\n probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 15, normed=True)\n >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_213[] = "RandomState.rand (line 1179)";
+static char __pyx_k_214[] = "\n rand(d0, d1, ..., dn)\n\n Random values in a given shape.\n\n Create an array of the given shape and propagate it with\n random samples from a uniform distribution\n over ``[0, 1)``.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, should all be positive.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n out : ndarray, shape ``(d0, d1, ..., dn)``\n Random values.\n\n See Also\n --------\n random\n\n Notes\n -----\n This is a convenience function. If you want an interface that\n takes a shape-tuple as the first argument, refer to\n np.random.random_sample .\n\n Examples\n --------\n >>> np.random.rand(3,2)\n array([[ 0.14022471, 0.96360618], #random\n [ 0.37601032, 0.25528411], #random\n [ 0.49313049, 0.94909878]]) #random\n\n ";
+static char __pyx_k_215[] = "RandomState.randn (line 1223)";
+static char __pyx_k_216[] = "\n randn(d0, d1, ..., dn)\n\n Return a sample (or samples) from the \"standard normal\" distribution.\n\n If positive, int_like or int-convertible arguments are provided,\n `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled\n with random floats sampled from a univariate \"normal\" (Gaussian)\n distribution of mean 0 and variance 1 (if any of the :math:`d_i` are\n floats, they are first converted to integers by truncation). A single\n float randomly sampled from the distribution is returned if no\n argument is provided.\n\n This is a convenience function. If you want an interface that takes a\n tuple as the first argument, use `numpy.random.standard_normal` instead.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, should be all positive.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n Z : ndarray or float\n A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from\n the standard normal distribution, or a single such float if\n no parameters were supplied.\n\n See Also\n --------\n random.standard_normal : Similar, but takes a tuple as its argument.\n\n Notes\n -----\n For random samples from :math:`N(\\mu, \\sigma^2)`, use:\n\n ``sigma * np.random.randn(...) + mu``\n\n Examples\n --------\n >>> np.random.randn()\n 2.1923875335537315 #random\n\n Two-by-four array of samples from N(3, 6.25):\n\n >>> 2.5 * np.random.randn(2, 4) + 3\n array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random\n [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random\n\n ";
+static char __pyx_k_217[] = "RandomState.random_integers (line 1280)";
+static char __pyx_k_218[] = "\n random_integers(low, high=None, size=None)\n\n Return random integers between `low` and `high`, inclusive.\n\n Return random integers from the \"discrete uniform\" distribution in the\n closed interval [`low`, `high`]. If `high` is None (the default),\n then results are from [1, `low`].\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is the *highest* such\n integer).\n high : int, optional\n If provided, the largest (signed) integer to be drawn from the\n distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single int is returned.\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.randint : Similar to `random_integers`, only for the half-open\n interval [`low`, `high`), and 0 is the lowest value if `high` is\n omitted.\n\n Notes\n -----\n To sample from N evenly spaced floating-point numbers between a and b,\n use::\n\n a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.)\n\n Examples\n --------\n >>> np.random.random_integers(5)\n 4\n >>> type(np.random.random_integers(5))\n <type 'int'>\n >>> np.random.random_integers(5, size=(3.,2.))\n array([[5, 4],\n [3, 3],\n [4, 5]])\n\n Choose five random numbers from the set of five evenly-spaced\n numbers between 0 and 2.5, inclusive (*i.e.*, from the set\n :math:`{0, 5/8, 10/8, 15/8, 20/8}`):\n""\n >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.\n array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ])\n\n Roll two six sided dice 1000 times and sum the results:\n\n >>> d1 = np.random.random_integers(1, 6, 1000)\n >>> d2 = np.random.random_integers(1, 6, 1000)\n >>> dsums = d1 + d2\n\n Display results as a histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(dsums, 11, normed=True)\n >>> plt.show()\n\n ";
+static char __pyx_k_219[] = "RandomState.standard_normal (line 1358)";
+static char __pyx_k_220[] = "\n standard_normal(size=None)\n\n Returns samples from a Standard Normal distribution (mean=0, stdev=1).\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n >>> s = np.random.standard_normal(8000)\n >>> s\n array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random\n -0.38672696, -0.4685006 ]) #random\n >>> s.shape\n (8000,)\n >>> s = np.random.standard_normal(size=(3, 4, 2))\n >>> s.shape\n (3, 4, 2)\n\n ";
+static char __pyx_k_221[] = "RandomState.normal (line 1390)";
+static char __pyx_k_222[] = "\n normal(loc=0.0, scale=1.0, size=None)\n\n Draw random samples from a normal (Gaussian) distribution.\n\n The probability density function of the normal distribution, first\n derived by De Moivre and 200 years later by both Gauss and Laplace\n independently [2]_, is often called the bell curve because of\n its characteristic shape (see the example below).\n\n The normal distributions occurs often in nature. For example, it\n describes the commonly occurring distribution of samples influenced\n by a large number of tiny, random disturbances, each with its own\n unique distribution [2]_.\n\n Parameters\n ----------\n loc : float\n Mean (\"centre\") of the distribution.\n scale : float\n Standard deviation (spread or \"width\") of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.norm : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gaussian distribution is\n\n .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}\n e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },\n\n where :math:`\\mu` is the mean and :math:`\\sigma` the standard deviation.\n The square of the standard deviation, :math:`\\sigma^2`, is called the\n variance.\n\n The function has its peak at the mean, and its \"spread\" increases with\n the standard deviation (the function reaches 0.607 times its maximum at\n :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that\n `numpy.random.normal` is more likely to return samples lying close to the\n mean, rather than those far away.\n""\n References\n ----------\n .. [1] Wikipedia, \"Normal distribution\",\n http://en.wikipedia.org/wiki/Normal_distribution\n .. [2] P. R. Peebles Jr., \"Central Limit Theorem\" in \"Probability, Random\n Variables and Random Signal Principles\", 4th ed., 2001,\n pp. 51, 51, 125.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, sigma = 0, 0.1 # mean and standard deviation\n >>> s = np.random.normal(mu, sigma, 1000)\n\n Verify the mean and the variance:\n\n >>> abs(mu - np.mean(s)) < 0.01\n True\n\n >>> abs(sigma - np.std(s, ddof=1)) < 0.01\n True\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\n ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_223[] = "RandomState.standard_exponential (line 1603)";
+static char __pyx_k_224[] = "\n standard_exponential(size=None)\n\n Draw samples from the standard exponential distribution.\n\n `standard_exponential` is identical to the exponential distribution\n with a scale parameter of 1.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n Output a 3x8000 array:\n\n >>> n = np.random.standard_exponential((3, 8000))\n\n ";
+static char __pyx_k_225[] = "RandomState.standard_gamma (line 1631)";
+static char __pyx_k_226[] = "\n standard_gamma(shape, size=None)\n\n Draw samples from a Standard Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n shape (sometimes designated \"k\") and scale=1.\n\n Parameters\n ----------\n shape : float\n Parameter, should be > 0.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or scalar\n The drawn samples.\n\n See Also\n --------\n scipy.stats.distributions.gamma : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma-distribution\",\n http://en.wikipedia.org/wiki/Gamma-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2., 1. # mean and width\n >>> s = np.random.standard_gamma(shape, 1000000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt""\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\\n ... (sps.gamma(shape) * scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_227[] = "RandomState.gamma (line 1713)";
+static char __pyx_k_228[] = "\n gamma(shape, scale=1.0, size=None)\n\n Draw samples from a Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n `shape` (sometimes designated \"k\") and `scale` (sometimes designated\n \"theta\"), where both parameters are > 0.\n\n Parameters\n ----------\n shape : scalar > 0\n The shape of the gamma distribution.\n scale : scalar > 0, optional\n The scale of the gamma distribution. Default is equal to 1.\n size : shape_tuple, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n out : ndarray, float\n Returns one sample unless `size` parameter is specified.\n\n See Also\n --------\n scipy.stats.distributions.gamma : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma-distribution\",\n http://en.wikipedia.org/wiki/Gamma-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2.,"" 2. # mean and dispersion\n >>> s = np.random.gamma(shape, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1)*(np.exp(-bins/scale) /\n ... (sps.gamma(shape)*scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_229[] = "RandomState.f (line 1804)";
+static char __pyx_k_230[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from a F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom\n in denominator), where both parameters should be greater than zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : float\n Degrees of freedom in denominator. Should be greater than zero.\n size : {tuple, int}, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``,\n then ``m * n * k`` samples are drawn. By default only one sample\n is returned.\n\n Returns\n -------\n samples : {ndarray, scalar}\n Samples from the Fisher distribution.\n\n See Also\n --------\n scipy.stats.distributions.f : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.""\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40.\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n ";
+static char __pyx_k_231[] = "RandomState.noncentral_f (line 1906)";
+static char __pyx_k_232[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : int\n Parameter, should be > 1.\n dfden : int\n Parameter, should be > 1.\n nonc : float\n Parameter, should be >= 0.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : scalar or ndarray\n Drawn samples.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n Weisstein, Eric W. \"Noncentral F-Distribution.\" From MathWorld--A Wolfram\n Web Resource. http://mathworld.wolfram.com/NoncentralF-Distribution.html\n\n Wikipedia, \"Noncentral F distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-distribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comp""arison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n ";
+static char __pyx_k_233[] = "RandomState.chisquare (line 2001)";
+static char __pyx_k_234[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : int\n Number of degrees of freedom.\n size : tuple of ints, int, optional\n Size of the returned array. By default, a scalar is\n returned.\n\n Returns\n -------\n output : ndarray\n Samples drawn from the distribution, packed in a `size`-shaped\n array.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n `NIST/SEMATECH e-Handbook of Statistical Methods\n <http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm>`_\n\n Examples\n --------\n >>> np.random.chisquare(2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n ";
+static char __pyx_k_235[] = "RandomState.noncentral_chisquare (line 2079)";
+static char __pyx_k_236[] = "\n noncentral_chisquare(df, nonc, size=None)\n\n Draw samples from a noncentral chi-square distribution.\n\n The noncentral :math:`\\chi^2` distribution is a generalisation of\n the :math:`\\chi^2` distribution.\n\n Parameters\n ----------\n df : int\n Degrees of freedom, should be >= 1.\n nonc : float\n Non-centrality, should be > 0.\n size : int or tuple of ints\n Shape of the output.\n\n Notes\n -----\n The probability density function for the noncentral Chi-square distribution\n is\n\n .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}\n \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}P_{Y_{df+2i}}(x),\n\n where :math:`Y_{q}` is the Chi-square with q degrees of freedom.\n\n In Delhi (2007), it is noted that the noncentral chi-square is useful in\n bombing and coverage problems, the probability of killing the point target\n given by the noncentral chi-squared distribution.\n\n References\n ----------\n .. [1] Delhi, M.S. Holla, \"On a noncentral chi-square distribution in the\n analysis of weapon systems effectiveness\", Metrika, Volume 15,\n Number 1 / December, 1970.\n .. [2] Wikipedia, \"Noncentral chi-square distribution\"\n http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n "" ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n ";
+static char __pyx_k_237[] = "RandomState.standard_cauchy (line 2171)";
+static char __pyx_k_238[] = "\n standard_cauchy(size=None)\n\n Standard Cauchy distribution with mode = 0.\n\n Also known as the Lorentz distribution.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n samples : ndarray or scalar\n The drawn samples.\n\n Notes\n -----\n The probability density function for the full Cauchy distribution is\n\n .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+\n (\\frac{x-x_0}{\\gamma})^2 \\bigr] }\n\n and the Standard Cauchy distribution just sets :math:`x_0=0` and\n :math:`\\gamma=1`\n\n The Cauchy distribution arises in the solution to the driven harmonic\n oscillator problem, and also describes spectral line broadening. It\n also describes the distribution of values at which a line tilted at\n a random angle will cut the x axis.\n\n When studying hypothesis tests that assume normality, seeing how the\n tests perform on data from a Cauchy distribution is a good indicator of\n their sensitivity to a heavy-tailed distribution, since the Cauchy looks\n very much like a Gaussian distribution, but with heavier tails.\n\n References\n ----------\n .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, \"Cauchy\n Distribution\",\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm\n .. [2] Weisstein, Eric W. \"Cauchy Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/CauchyDistribution.html\n .. [3] Wikipedia, \"Cauchy distribution\"\n http://en.wikipedia.org/wiki/Cauchy_distribution\n\n Examples\n --------\n Draw samples and plot the distribution:\n\n >>> s = np.random.standard_cauchy(1000000)\n >>> s = s[(s>-25) & (s<""25)] # truncate distribution so it plots well\n >>> plt.hist(s, bins=100)\n >>> plt.show()\n\n ";
+static char __pyx_k_239[] = "RandomState.standard_t (line 2232)";
+static char __pyx_k_240[] = "\n standard_t(df, size=None)\n\n Standard Student's t distribution with df degrees of freedom.\n\n A special case of the hyperbolic distribution.\n As `df` gets large, the result resembles that of the standard normal\n distribution (`standard_normal`).\n\n Parameters\n ----------\n df : int\n Degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n Drawn samples.\n\n Notes\n -----\n The probability density function for the t distribution is\n\n .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}\n \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}\n\n The t test is based on an assumption that the data come from a Normal\n distribution. The t test provides a way to test whether the sample mean\n (that is the mean calculated from the data) is a good estimate of the true\n mean.\n\n The derivation of the t-distribution was forst published in 1908 by William\n Gisset while working for the Guinness Brewery in Dublin. Due to proprietary\n issues, he had to publish under a pseudonym, and so he used the name\n Student.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics With R\",\n Springer, 2002.\n .. [2] Wikipedia, \"Student's t-distribution\"\n http://en.wikipedia.org/wiki/Student's_t-distribution\n\n Examples\n --------\n From Dalgaard page 83 [1]_, suppose the daily energy intake for 11\n women in Kj is:\n\n >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Doe""s their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> >>> np.sum(s<t) / float(len(s))\n 0.0090699999999999999 #random\n\n So the p-value is about 0.009, which says the null hypothesis has a\n probability of about 99% of being true.\n\n ";
+static char __pyx_k_241[] = "RandomState.vonmises (line 2333)";
+static char __pyx_k_242[] = "\n vonmises(mu, kappa, size=None)\n\n Draw samples from a von Mises distribution.\n\n Samples are drawn from a von Mises distribution with specified mode\n (mu) and dispersion (kappa), on the interval [-pi, pi].\n\n The von Mises distribution (also known as the circular normal\n distribution) is a continuous probability distribution on the unit\n circle. It may be thought of as the circular analogue of the normal\n distribution.\n\n Parameters\n ----------\n mu : float\n Mode (\"center\") of the distribution.\n kappa : float\n Dispersion of the distribution, has to be >=0.\n size : int or tuple of int\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : scalar or ndarray\n The returned samples, which are in the interval [-pi, pi].\n\n See Also\n --------\n scipy.stats.distributions.vonmises : probability density function,\n distribution, or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the von Mises distribution is\n\n .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},\n\n where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,\n and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.\n\n The von Mises is named for Richard Edler von Mises, who was born in\n Austria-Hungary, in what is now the Ukraine. He fled to the United\n States in 1939 and became a professor at Harvard. He worked in\n probability theory, aerodynamics, fluid mechanics, and philosophy of\n science.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A. (ed.), *Handbook of Mathematical\n Functions*, New York: Dover, 1965.\n\n "" von Mises, R., *Mathematical Theory of Probability and Statistics*,\n New York: Academic Press, 1964.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, kappa = 0.0, 4.0 # mean and dispersion\n >>> s = np.random.vonmises(mu, kappa, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> x = np.arange(-np.pi, np.pi, 2*np.pi/50.)\n >>> y = -np.exp(kappa*np.cos(x-mu))/(2*np.pi*sps.jn(0,kappa))\n >>> plt.plot(x, y/max(y), linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_243[] = "RandomState.pareto (line 2427)";
+static char __pyx_k_244[] = "\n pareto(a, size=None)\n\n Draw samples from a Pareto II or Lomax distribution with specified shape.\n\n The Lomax or Pareto II distribution is a shifted Pareto distribution. The\n classical Pareto distribution can be obtained from the Lomax distribution\n by adding the location parameter m, see below. The smallest value of the\n Lomax distribution is zero while for the classical Pareto distribution it\n is m, where the standard Pareto distribution has location m=1.\n Lomax can also be considered as a simplified version of the Generalized\n Pareto distribution (available in SciPy), with the scale set to one and\n the location set to zero.\n\n The Pareto distribution must be greater than zero, and is unbounded above.\n It is also known as the \"80-20 rule\". In this distribution, 80 percent of\n the weights are in the lowest 20 percent of the range, while the other 20\n percent fill the remaining 80 percent of the range.\n\n Parameters\n ----------\n shape : float, > 0.\n Shape of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.lomax.pdf : probability density function,\n distribution or cumulative density function, etc.\n scipy.stats.distributions.genpareto.pdf : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Pareto distribution is\n\n .. math:: p(x) = \\frac{am^a}{x^{a+1}}\n\n where :math:`a` is the shape and :math:`m` the location\n\n The Pareto distribution, named after the Italian economist Vilfredo Pareto,\n is a power law probability distribution useful in many real world probl""ems.\n Outside the field of economics it is generally referred to as the Bradford\n distribution. Pareto developed the distribution to describe the\n distribution of wealth in an economy. It has also found use in insurance,\n web page access statistics, oil field sizes, and many other problems,\n including the download frequency for projects in Sourceforge [1]. It is\n one of the so-called \"fat-tailed\" distributions.\n\n\n References\n ----------\n .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of\n Sourceforge projects.\n .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.\n .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme\n Values, Birkhauser Verlag, Basel, pp 23-30.\n .. [4] Wikipedia, \"Pareto distribution\",\n http://en.wikipedia.org/wiki/Pareto_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a, m = 3., 1. # shape and mode\n >>> s = np.random.pareto(a, 1000) + m\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='center')\n >>> fit = a*m**a/bins**(a+1)\n >>> plt.plot(bins, max(count)*fit/max(fit),linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_245[] = "RandomState.weibull (line 2523)";
+static char __pyx_k_246[] = "\n weibull(a, size=None)\n\n Weibull distribution.\n\n Draw samples from a 1-parameter Weibull distribution with the given\n shape parameter `a`.\n\n .. math:: X = (-ln(U))^{1/a}\n\n Here, U is drawn from the uniform distribution over (0,1].\n\n The more common 2-parameter Weibull, including a scale parameter\n :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.\n\n Parameters\n ----------\n a : float\n Shape of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n See Also\n --------\n scipy.stats.distributions.weibull_max\n scipy.stats.distributions.weibull_min\n scipy.stats.distributions.genextreme\n gumbel\n\n Notes\n -----\n The Weibull (or Type III asymptotic extreme value distribution for smallest\n values, SEV Type III, or Rosin-Rammler distribution) is one of a class of\n Generalized Extreme Value (GEV) distributions used in modeling extreme\n value problems. This class includes the Gumbel and Frechet distributions.\n\n The probability density for the Weibull distribution is\n\n .. math:: p(x) = \\frac{a}\n {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},\n\n where :math:`a` is the shape and :math:`\\lambda` the scale.\n\n The function has its peak (the mode) at\n :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.\n\n When ``a = 1``, the Weibull distribution reduces to the exponential\n distribution.\n\n References\n ----------\n .. [1] Waloddi Weibull, Professor, Royal Technical University, Stockholm,\n 1939 \"A Statistical Theory Of The Strength Of Materials\",\n Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,\n General""stabens Litografiska Anstalts Forlag, Stockholm.\n .. [2] Waloddi Weibull, 1951 \"A Statistical Distribution Function of Wide\n Applicability\", Journal Of Applied Mechanics ASME Paper.\n .. [3] Wikipedia, \"Weibull distribution\",\n http://en.wikipedia.org/wiki/Weibull_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 5. # shape\n >>> s = np.random.weibull(a, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> x = np.arange(1,100.)/50.\n >>> def weib(x,n,a):\n ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)\n\n >>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000))\n >>> x = np.arange(1,100.)/50.\n >>> scale = count.max()/weib(x, 1., 5.).max()\n >>> plt.plot(x, weib(x, 1., 5.)*scale)\n >>> plt.show()\n\n ";
+static char __pyx_k_247[] = "RandomState.power (line 2623)";
+static char __pyx_k_248[] = "\n power(a, size=None)\n\n Draws samples in [0, 1] from a power distribution with positive\n exponent a - 1.\n\n Also known as the power function distribution.\n\n Parameters\n ----------\n a : float\n parameter, > 0\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n The returned samples lie in [0, 1].\n\n Raises\n ------\n ValueError\n If a<1.\n\n Notes\n -----\n The probability density function is\n\n .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.\n\n The power function distribution is just the inverse of the Pareto\n distribution. It may also be seen as a special case of the Beta\n distribution.\n\n It is used, for example, in modeling the over-reporting of insurance\n claims.\n\n References\n ----------\n .. [1] Christian Kleiber, Samuel Kotz, \"Statistical size distributions\n in economics and actuarial sciences\", Wiley, 2003.\n .. [2] Heckert, N. A. and Filliben, James J. (2003). NIST Handbook 148:\n Dataplot Reference Manual, Volume 2: Let Subcommands and Library\n Functions\", National Institute of Standards and Technology Handbook\n Series, June 2003.\n http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 5. # shape\n >>> samples = 1000\n >>> s = np.random.power(a, samples)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, bins=""30)\n >>> x = np.linspace(0, 1, 100)\n >>> y = a*x**(a-1.)\n >>> normed_y = samples*np.diff(bins)[0]*y\n >>> plt.plot(x, normed_y)\n >>> plt.show()\n\n Compare the power function distribution to the inverse of the Pareto.\n\n >>> from scipy import stats\n >>> rvs = np.random.power(5, 1000000)\n >>> rvsp = np.random.pareto(5, 1000000)\n >>> xx = np.linspace(0,1,100)\n >>> powpdf = stats.powerlaw.pdf(xx,5)\n\n >>> plt.figure()\n >>> plt.hist(rvs, bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('np.random.power(5)')\n\n >>> plt.figure()\n >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('inverse of 1 + np.random.pareto(5)')\n\n >>> plt.figure()\n >>> plt.hist(1./(1.+rvsp), bins=50, normed=True)\n >>> plt.plot(xx,powpdf,'r-')\n >>> plt.title('inverse of stats.pareto(5)')\n\n ";
+static char __pyx_k_249[] = "RandomState.laplace (line 2732)";
+static char __pyx_k_250[] = "\n laplace(loc=0.0, scale=1.0, size=None)\n\n Draw samples from the Laplace or double exponential distribution with\n specified location (or mean) and scale (decay).\n\n The Laplace distribution is similar to the Gaussian/normal distribution,\n but is sharper at the peak and has fatter tails. It represents the\n difference between two independent, identically distributed exponential\n random variables.\n\n Parameters\n ----------\n loc : float\n The position, :math:`\\mu`, of the distribution peak.\n scale : float\n :math:`\\lambda`, the exponential decay.\n\n Notes\n -----\n It has the probability density function\n\n .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}\n \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).\n\n The first law of Laplace, from 1774, states that the frequency of an error\n can be expressed as an exponential function of the absolute magnitude of\n the error, which leads to the Laplace distribution. For many problems in\n Economics and Health sciences, this distribution seems to model the data\n better than the standard Gaussian distribution\n\n\n References\n ----------\n .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). Handbook of Mathematical\n Functions with Formulas, Graphs, and Mathematical Tables, 9th\n printing. New York: Dover, 1972.\n\n .. [2] The Laplace distribution and generalizations\n By Samuel Kotz, Tomasz J. Kozubowski, Krzysztof Podgorski,\n Birkhauser, 2001.\n\n .. [3] Weisstein, Eric W. \"Laplace Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/LaplaceDistribution.html\n\n .. [4] Wikipedia, \"Laplace distribution\",\n http://en.wikipedia.org/wik""i/Laplace_distribution\n\n Examples\n --------\n Draw samples from the distribution\n\n >>> loc, scale = 0., 1.\n >>> s = np.random.laplace(loc, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> x = np.arange(-8., 8., .01)\n >>> pdf = np.exp(-abs(x-loc/scale))/(2.*scale)\n >>> plt.plot(x, pdf)\n\n Plot Gaussian for comparison:\n\n >>> g = (1/(scale * np.sqrt(2 * np.pi)) * \n ... np.exp( - (x - loc)**2 / (2 * scale**2) ))\n >>> plt.plot(x,g)\n\n ";
+static char __pyx_k_251[] = "RandomState.gumbel (line 2822)";
+static char __pyx_k_252[] = "\n gumbel(loc=0.0, scale=1.0, size=None)\n\n Gumbel distribution.\n\n Draw samples from a Gumbel distribution with specified location and scale.\n For more information on the Gumbel distribution, see Notes and References\n below.\n\n Parameters\n ----------\n loc : float\n The location of the mode of the distribution.\n scale : float\n The scale parameter of the distribution.\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n out : ndarray\n The samples\n\n See Also\n --------\n scipy.stats.gumbel_l\n scipy.stats.gumbel_r\n scipy.stats.genextreme\n probability density function, distribution, or cumulative density\n function, etc. for each of the above\n weibull\n\n Notes\n -----\n The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value\n Type I) distribution is one of a class of Generalized Extreme Value (GEV)\n distributions used in modeling extreme value problems. The Gumbel is a\n special case of the Extreme Value Type I distribution for maximums from\n distributions with \"exponential-like\" tails.\n\n The probability density for the Gumbel distribution is\n\n .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/\n \\beta}},\n\n where :math:`\\mu` is the mode, a location parameter, and :math:`\\beta` is\n the scale parameter.\n\n The Gumbel (named for German mathematician Emil Julius Gumbel) was used\n very early in the hydrology literature, for modeling the occurrence of\n flood events. It is also used for modeling maximum wind speed and rainfall\n rates. It is a \"fat-tailed\" distribution - the ""probability of an event in\n the tail of the distribution is larger than if one used a Gaussian, hence\n the surprisingly frequent occurrence of 100-year floods. Floods were\n initially modeled as a Gaussian process, which underestimated the frequency\n of extreme events.\n\n\n It is one of a class of extreme value distributions, the Generalized\n Extreme Value (GEV) distributions, which also includes the Weibull and\n Frechet.\n\n The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance of\n :math:`\\frac{\\pi^2}{6}\\beta^2`.\n\n References\n ----------\n Gumbel, E. J., *Statistics of Extremes*, New York: Columbia University\n Press, 1958.\n\n Reiss, R.-D. and Thomas, M., *Statistical Analysis of Extreme Values from\n Insurance, Finance, Hydrology and Other Fields*, Basel: Birkhauser Verlag,\n 2001.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, beta = 0, 0.1 # location and scale\n >>> s = np.random.gumbel(mu, beta, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp( -np.exp( -(bins - mu) /beta) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n Show how an extreme value distribution can arise from a Gaussian process\n and compare to a Gaussian:\n\n >>> means = []\n >>> maxima = []\n >>> for i in range(0,1000) :\n ... a = np.random.normal(mu, beta, 1000)\n ... means.append(a.mean())\n ... maxima.append(a.max())\n >>> count, bins, ignored = plt.hist(maxima, 30, normed=True)\n >>> beta = np.std(maxima)*np.pi/np.sqrt(6)""\n >>> mu = np.mean(maxima) - 0.57721*beta\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp(-np.exp(-(bins - mu)/beta)),\n ... linewidth=2, color='r')\n >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))\n ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),\n ... linewidth=2, color='g')\n >>> plt.show()\n\n ";
+static char __pyx_k_253[] = "RandomState.logistic (line 2953)";
+static char __pyx_k_254[] = "\n logistic(loc=0.0, scale=1.0, size=None)\n\n Draw samples from a Logistic distribution.\n\n Samples are drawn from a Logistic distribution with specified\n parameters, loc (location or mean, also median), and scale (>0).\n\n Parameters\n ----------\n loc : float\n\n scale : float > 0.\n\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.logistic : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Logistic distribution is\n\n .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},\n\n where :math:`\\mu` = location and :math:`s` = scale.\n\n The Logistic distribution is used in Extreme Value problems where it\n can act as a mixture of Gumbel distributions, in Epidemiology, and by\n the World Chess Federation (FIDE) where it is used in the Elo ranking\n system, assuming the performance of each player is a logistically\n distributed random variable.\n\n References\n ----------\n .. [1] Reiss, R.-D. and Thomas M. (2001), Statistical Analysis of Extreme\n Values, from Insurance, Finance, Hydrology and Other Fields,\n Birkhauser Verlag, Basel, pp 132-133.\n .. [2] Weisstein, Eric W. \"Logistic Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/LogisticDistribution.html\n .. [3] Wikipedia, \"Logistic-distribution\",\n http://en.wikipedia.org/wiki/Logistic-distribution\n\n Examples\n "" --------\n Draw samples from the distribution:\n\n >>> loc, scale = 10, 1\n >>> s = np.random.logistic(loc, scale, 10000)\n >>> count, bins, ignored = plt.hist(s, bins=50)\n\n # plot against distribution\n\n >>> def logist(x, loc, scale):\n ... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2)\n >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\\\n ... logist(bins, loc, scale).max())\n >>> plt.show()\n\n ";
+static char __pyx_k_255[] = "RandomState.lognormal (line 3041)";
+static char __pyx_k_256[] = "\n lognormal(mean=0.0, sigma=1.0, size=None)\n\n Return samples drawn from a log-normal distribution.\n\n Draw samples from a log-normal distribution with specified mean,\n standard deviation, and array shape. Note that the mean and standard\n deviation are not the values for the distribution itself, but of the\n underlying normal distribution it is derived from.\n\n Parameters\n ----------\n mean : float\n Mean value of the underlying normal distribution\n sigma : float, > 0.\n Standard deviation of the underlying normal distribution\n size : tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or float\n The desired samples. An array of the same shape as `size` if given,\n if `size` is None a float is returned.\n\n See Also\n --------\n scipy.stats.lognorm : probability density function, distribution,\n cumulative density function, etc.\n\n Notes\n -----\n A variable `x` has a log-normal distribution if `log(x)` is normally\n distributed. The probability density function for the log-normal\n distribution is:\n\n .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}\n e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}\n\n where :math:`\\mu` is the mean and :math:`\\sigma` is the standard\n deviation of the normally distributed logarithm of the variable.\n A log-normal distribution results if a random variable is the *product*\n of a large number of independent, identically-distributed variables in\n the same way that a normal distribution results if the variable is the\n *sum* of a large number of independent, identically-distributed\n variables.\n\n Reference""s\n ----------\n Limpert, E., Stahel, W. A., and Abbt, M., \"Log-normal Distributions\n across the Sciences: Keys and Clues,\" *BioScience*, Vol. 51, No. 5,\n May, 2001. http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf\n\n Reiss, R.D. and Thomas, M., *Statistical Analysis of Extreme Values*,\n Basel: Birkhauser Verlag, 2001, pp. 31-32.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, sigma = 3., 1. # mean and standard deviation\n >>> s = np.random.lognormal(mu, sigma, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 100, normed=True, align='mid')\n\n >>> x = np.linspace(min(bins), max(bins), 10000)\n >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))\n ... / (x * sigma * np.sqrt(2 * np.pi)))\n\n >>> plt.plot(x, pdf, linewidth=2, color='r')\n >>> plt.axis('tight')\n >>> plt.show()\n\n Demonstrate that taking the products of random samples from a uniform\n distribution can be fit well by a log-normal probability density function.\n\n >>> # Generate a thousand samples: each is the product of 100 random\n >>> # values, drawn from a normal distribution.\n >>> b = []\n >>> for i in range(1000):\n ... a = 10. + np.random.random(100)\n ... b.append(np.product(a))\n\n >>> b = np.array(b) / np.min(b) # scale values to be positive\n >>> count, bins, ignored = plt.hist(b, 100, normed=True, align='center')\n >>> sigma = np.std(np.log(b))\n >>> mu = np.mean(np.log(b))\n\n >>> x = np.linspace(min(bins), max(bins), 10000)\n >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))\n ... / (x * sigma * np.sqrt(2 * np.pi)))\n\n >>> plt.plot(x, pdf, co""lor='r', linewidth=2)\n >>> plt.show()\n\n ";
+static char __pyx_k_257[] = "RandomState.rayleigh (line 3162)";
+static char __pyx_k_258[] = "\n rayleigh(scale=1.0, size=None)\n\n Draw samples from a Rayleigh distribution.\n\n The :math:`\\chi` and Weibull distributions are generalizations of the\n Rayleigh.\n\n Parameters\n ----------\n scale : scalar\n Scale, also equals the mode. Should be >= 0.\n size : int or tuple of ints, optional\n Shape of the output. Default is None, in which case a single\n value is returned.\n\n Notes\n -----\n The probability density function for the Rayleigh distribution is\n\n .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}\n\n The Rayleigh distribution arises if the wind speed and wind direction are\n both gaussian variables, then the vector wind velocity forms a Rayleigh\n distribution. The Rayleigh distribution is used to model the expected\n output from wind turbines.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., Rayleigh Distribution,\n http://www.brighton-webs.co.uk/distributions/rayleigh.asp\n .. [2] Wikipedia, \"Rayleigh distribution\"\n http://en.wikipedia.org/wiki/Rayleigh_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)\n\n Wave heights tend to follow a Rayleigh distribution. If the mean wave\n height is 1 meter, what fraction of waves are likely to be larger than 3\n meters?\n\n >>> meanvalue = 1\n >>> modevalue = np.sqrt(2 / np.pi) * meanvalue\n >>> s = np.random.rayleigh(modevalue, 1000000)\n\n The percentage of waves larger than 3 meters is:\n\n >>> 100.*sum(s>3)/1000000.\n 0.087300000000000003\n\n ";
+static char __pyx_k_259[] = "RandomState.wald (line 3234)";
+static char __pyx_k_260[] = "\n wald(mean, scale, size=None)\n\n Draw samples from a Wald, or Inverse Gaussian, distribution.\n\n As the scale approaches infinity, the distribution becomes more like a\n Gaussian.\n\n Some references claim that the Wald is an Inverse Gaussian with mean=1, but\n this is by no means universal.\n\n The Inverse Gaussian distribution was first studied in relationship to\n Brownian motion. In 1956 M.C.K. Tweedie used the name Inverse Gaussian\n because there is an inverse relationship between the time to cover a unit\n distance and distance covered in unit time.\n\n Parameters\n ----------\n mean : scalar\n Distribution mean, should be > 0.\n scale : scalar\n Scale parameter, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n Drawn sample, all greater than zero.\n\n Notes\n -----\n The probability density function for the Wald distribution is\n\n .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^\n \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}\n\n As noted above the Inverse Gaussian distribution first arise from attempts\n to model Brownian Motion. It is also a competitor to the Weibull for use in\n reliability modeling and modeling stock returns and interest rate\n processes.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., Wald Distribution,\n http://www.brighton-webs.co.uk/distributions/wald.asp\n .. [2] Chhikara, Raj S., and Folks, J. Leroy, \"The Inverse Gaussian\n Distribution: Theory : Methodology, and Applications\", CRC Press,\n 1988.\n .. [3] Wikipedia, \"Wald distribu""tion\"\n http://en.wikipedia.org/wiki/Wald_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)\n >>> plt.show()\n\n ";
+static char __pyx_k_261[] = "RandomState.triangular (line 3320)";
+static char __pyx_k_262[] = "\n triangular(left, mode, right, size=None)\n\n Draw samples from the triangular distribution.\n\n The triangular distribution is a continuous probability distribution with\n lower limit left, peak at mode, and upper limit right. Unlike the other\n distributions, these parameters directly define the shape of the pdf.\n\n Parameters\n ----------\n left : scalar\n Lower limit.\n mode : scalar\n The value where the peak of the distribution occurs.\n The value should fulfill the condition ``left <= mode <= right``.\n right : scalar\n Upper limit, should be larger than `left`.\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n samples : ndarray or scalar\n The returned samples all lie in the interval [left, right].\n\n Notes\n -----\n The probability density function for the Triangular distribution is\n\n .. math:: P(x;l, m, r) = \\begin{cases}\n \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\\n \\frac{2(m-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\\n 0& \\text{otherwise}.\n \\end{cases}\n\n The triangular distribution is often used in ill-defined problems where the\n underlying distribution is not known, but some knowledge of the limits and\n mode exists. Often it is used in simulations.\n\n References\n ----------\n .. [1] Wikipedia, \"Triangular distribution\"\n http://en.wikipedia.org/wiki/Triangular_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=""200,\n ... normed=True)\n >>> plt.show()\n\n ";
+static char __pyx_k_263[] = "RandomState.binomial (line 3408)";
+static char __pyx_k_264[] = "\n binomial(n, p, size=None)\n\n Draw samples from a binomial distribution.\n\n Samples are drawn from a Binomial distribution with specified\n parameters, n trials and p probability of success where\n n an integer >= 0 and p is in the interval [0,1]. (n may be\n input as a float, but it is truncated to an integer in use)\n\n Parameters\n ----------\n n : float (but truncated to an integer)\n parameter, >= 0.\n p : float\n parameter, >= 0 and <=1.\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.binom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Binomial distribution is\n\n .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},\n\n where :math:`n` is the number of trials, :math:`p` is the probability\n of success, and :math:`N` is the number of successes.\n\n When estimating the standard error of a proportion in a population by\n using a random sample, the normal distribution works well unless the\n product p*n <=5, where p = population proportion estimate, and n =\n number of samples, in which case the binomial distribution is used\n instead. For example, a sample of 15 people shows 4 who are left\n handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,\n so the binomial distribution should be used in this case.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics with R\",\n Springer-Verlag, 2002.""\n .. [2] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [3] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [4] Weisstein, Eric W. \"Binomial Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/BinomialDistribution.html\n .. [5] Wikipedia, \"Binomial-distribution\",\n http://en.wikipedia.org/wiki/Binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> n, p = 10, .5 # number of trials, probability of each trial\n >>> s = np.random.binomial(n, p, 1000)\n # result of flipping a coin 10 times, tested 1000 times.\n\n A real world example. A company drills 9 wild-cat oil exploration\n wells, each with an estimated probability of success of 0.1. All nine\n wells fail. What is the probability of that happening?\n\n Let's do 20,000 trials of the model, and count the number that\n generate zero positive results.\n\n >>> sum(np.random.binomial(9,0.1,20000)==0)/20000.\n answer = 0.38885, or 38%.\n\n ";
+static char __pyx_k_265[] = "RandomState.negative_binomial (line 3516)";
+static char __pyx_k_266[] = "\n negative_binomial(n, p, size=None)\n\n Draw samples from a negative_binomial distribution.\n\n Samples are drawn from a negative_Binomial distribution with specified\n parameters, `n` trials and `p` probability of success where `n` is an\n integer > 0 and `p` is in the interval [0, 1].\n\n Parameters\n ----------\n n : int\n Parameter, > 0.\n p : float\n Parameter, >= 0 and <=1.\n size : int or tuple of ints\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : int or ndarray of ints\n Drawn samples.\n\n Notes\n -----\n The probability density for the Negative Binomial distribution is\n\n .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},\n\n where :math:`n-1` is the number of successes, :math:`p` is the probability\n of success, and :math:`N+n-1` is the number of trials.\n\n The negative binomial distribution gives the probability of n-1 successes\n and N failures in N+n-1 trials, and success on the (N+n)th trial.\n\n If one throws a die repeatedly until the third time a \"1\" appears, then the\n probability distribution of the number of non-\"1\"s that appear before the\n third \"1\" is a negative binomial distribution.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Negative Binomial Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NegativeBinomialDistribution.html\n .. [2] Wikipedia, \"Negative binomial distribution\",\n http://en.wikipedia.org/wiki/Negative_binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n A real world example. A company drills wild-cat oil exploration well""s, each\n with an estimated probability of success of 0.1. What is the probability\n of having one success for each successive well, that is what is the\n probability of a single success after drilling 5 wells, after 6 wells,\n etc.?\n\n >>> s = np.random.negative_binomial(1, 0.1, 100000)\n >>> for i in range(1, 11):\n ... probability = sum(s<i) / 100000.\n ... print i, \"wells drilled, probability of one success =\", probability\n\n ";
+static char __pyx_k_267[] = "RandomState.poisson (line 3611)";
+static char __pyx_k_268[] = "\n poisson(lam=1.0, size=None)\n\n Draw samples from a Poisson distribution.\n\n The Poisson distribution is the limit of the Binomial\n distribution for large N.\n\n Parameters\n ----------\n lam : float\n Expectation of interval, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Notes\n -----\n The Poisson distribution\n\n .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}\n\n For events with an expected separation :math:`\\lambda` the Poisson\n distribution :math:`f(k; \\lambda)` describes the probability of\n :math:`k` events occurring within the observed interval :math:`\\lambda`.\n\n Because the output is limited to the range of the C long type, a\n ValueError is raised when `lam` is within 10 sigma of the maximum\n representable value.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Poisson Distribution.\" From MathWorld--A Wolfram\n Web Resource. http://mathworld.wolfram.com/PoissonDistribution.html\n .. [2] Wikipedia, \"Poisson distribution\",\n http://en.wikipedia.org/wiki/Poisson_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> import numpy as np\n >>> s = np.random.poisson(5, 10000)\n\n Display histogram of the sample:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 14, normed=True)\n >>> plt.show()\n\n ";
+static char __pyx_k_269[] = "RandomState.zipf (line 3682)";
+static char __pyx_k_270[] = "\n zipf(a, size=None)\n\n Draw samples from a Zipf distribution.\n\n Samples are drawn from a Zipf distribution with specified parameter\n `a` > 1.\n\n The Zipf distribution (also known as the zeta distribution) is a\n continuous probability distribution that satisfies Zipf's law: the\n frequency of an item is inversely proportional to its rank in a\n frequency table.\n\n Parameters\n ----------\n a : float > 1\n Distribution parameter.\n size : int or tuple of int, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn; a single integer is equivalent in\n its result to providing a mono-tuple, i.e., a 1-D array of length\n *size* is returned. The default is None, in which case a single\n scalar is returned.\n\n Returns\n -------\n samples : scalar or ndarray\n The returned samples are greater than or equal to one.\n\n See Also\n --------\n scipy.stats.distributions.zipf : probability density function,\n distribution, or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Zipf distribution is\n\n .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},\n\n where :math:`\\zeta` is the Riemann Zeta function.\n\n It is named for the American linguist George Kingsley Zipf, who noted\n that the frequency of any word in a sample of a language is inversely\n proportional to its rank in the frequency table.\n\n References\n ----------\n Zipf, G. K., *Selected Studies of the Principle of Relative Frequency\n in Language*, Cambridge, MA: Harvard Univ. Press, 1932.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 2. # parameter\n >>> s = np.random.zipf""(a, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n Truncate s values at 50 so plot is interesting\n >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)\n >>> x = np.arange(1., 50.)\n >>> y = x**(-a)/sps.zetac(a)\n >>> plt.plot(x, y/max(y), linewidth=2, color='r')\n >>> plt.show()\n\n ";
+static char __pyx_k_271[] = "RandomState.geometric (line 3770)";
+static char __pyx_k_272[] = "\n geometric(p, size=None)\n\n Draw samples from the geometric distribution.\n\n Bernoulli trials are experiments with one of two outcomes:\n success or failure (an example of such an experiment is flipping\n a coin). The geometric distribution models the number of trials\n that must be run in order to achieve success. It is therefore\n supported on the positive integers, ``k = 1, 2, ...``.\n\n The probability mass function of the geometric distribution is\n\n .. math:: f(k) = (1 - p)^{k - 1} p\n\n where `p` is the probability of success of an individual trial.\n\n Parameters\n ----------\n p : float\n The probability of success of an individual trial.\n size : tuple of ints\n Number of values to draw from the distribution. The output\n is shaped according to `size`.\n\n Returns\n -------\n out : ndarray\n Samples from the geometric distribution, shaped according to\n `size`.\n\n Examples\n --------\n Draw ten thousand values from the geometric distribution,\n with the probability of an individual success equal to 0.35:\n\n >>> z = np.random.geometric(p=0.35, size=10000)\n\n How many trials succeeded after a single run?\n\n >>> (z == 1).sum() / 10000.\n 0.34889999999999999 #random\n\n ";
+static char __pyx_k_273[] = "RandomState.hypergeometric (line 3836)";
+static char __pyx_k_274[] = "\n hypergeometric(ngood, nbad, nsample, size=None)\n\n Draw samples from a Hypergeometric distribution.\n\n Samples are drawn from a Hypergeometric distribution with specified\n parameters, ngood (ways to make a good selection), nbad (ways to make\n a bad selection), and nsample = number of items sampled, which is less\n than or equal to the sum ngood + nbad.\n\n Parameters\n ----------\n ngood : int or array_like\n Number of ways to make a good selection. Must be nonnegative.\n nbad : int or array_like\n Number of ways to make a bad selection. Must be nonnegative.\n nsample : int or array_like\n Number of items sampled. Must be at least 1 and at most\n ``ngood + nbad``.\n size : int or tuple of int\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or scalar\n The values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.hypergeom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Hypergeometric distribution is\n\n .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}},\n\n where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n`\n\n for P(x) the probability of x successes, n = ngood, m = nbad, and\n N = number of samples.\n\n Consider an urn with black and white marbles in it, ngood of them\n black and nbad are white. If you draw nsample balls without\n replacement, then the Hypergeometric distribution describes the\n distribution of black balls in the drawn sample.\n\n Note that this distribution is very similar to the Binomial\n distrib""ution, except that in this case, samples are drawn without\n replacement, whereas in the Binomial case samples are drawn with\n replacement (or the sample space is infinite). As the sample space\n becomes large, this distribution approaches the Binomial.\n\n References\n ----------\n .. [1] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [2] Weisstein, Eric W. \"Hypergeometric Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/HypergeometricDistribution.html\n .. [3] Wikipedia, \"Hypergeometric-distribution\",\n http://en.wikipedia.org/wiki/Hypergeometric-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> ngood, nbad, nsamp = 100, 2, 10\n # number of good, number of bad, and number of samples\n >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)\n >>> hist(s)\n # note that it is very unlikely to grab both bad items\n\n Suppose you have an urn with 15 white and 15 black marbles.\n If you pull 15 marbles at random, how likely is it that\n 12 or more of them are one color?\n\n >>> s = np.random.hypergeometric(15, 15, 15, 100000)\n >>> sum(s>=12)/100000. + sum(s<=3)/100000.\n # answer = 0.003 ... pretty unlikely!\n\n ";
+static char __pyx_k_275[] = "RandomState.logseries (line 3955)";
+static char __pyx_k_276[] = "\n logseries(p, size=None)\n\n Draw samples from a Logarithmic Series distribution.\n\n Samples are drawn from a Log Series distribution with specified\n parameter, p (probability, 0 < p < 1).\n\n Parameters\n ----------\n loc : float\n\n scale : float > 0.\n\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.logser : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Log Series distribution is\n\n .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},\n\n where p = probability.\n\n The Log Series distribution is frequently used to represent species\n richness and occurrence, first proposed by Fisher, Corbet, and\n Williams in 1943 [2]. It may also be used to model the numbers of\n occupants seen in cars [3].\n\n References\n ----------\n .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional\n species diversity through the log series distribution of\n occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,\n Volume 5, Number 5, September 1999 , pp. 187-195(9).\n .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The\n relation between the number of species and the number of\n individuals in a random sample of an animal population.\n Journal of Animal Ecology, 12:42-58.\n .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small\n Data Sets, CRC Press, 1994.\n .. [4] Wikipedia, \"Log""arithmic-distribution\",\n http://en.wikipedia.org/wiki/Logarithmic-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = .6\n >>> s = np.random.logseries(a, 10000)\n >>> count, bins, ignored = plt.hist(s)\n\n # plot against distribution\n\n >>> def logseries(k, p):\n ... return -p**k/(k*log(1-p))\n >>> plt.plot(bins, logseries(bins, a)*count.max()/\n logseries(bins, a).max(), 'r')\n >>> plt.show()\n\n ";
+static char __pyx_k_277[] = "RandomState.multivariate_normal (line 4050)";
+static char __pyx_k_278[] = "\n multivariate_normal(mean, cov[, size])\n\n Draw random samples from a multivariate normal distribution.\n\n The multivariate normal, multinormal or Gaussian distribution is a\n generalization of the one-dimensional normal distribution to higher\n dimensions. Such a distribution is specified by its mean and\n covariance matrix. These parameters are analogous to the mean\n (average or \"center\") and variance (standard deviation, or \"width,\"\n squared) of the one-dimensional normal distribution.\n\n Parameters\n ----------\n mean : 1-D array_like, of length N\n Mean of the N-dimensional distribution.\n cov : 2-D array_like, of shape (N, N)\n Covariance matrix of the distribution. Must be symmetric and\n positive semi-definite for \"physically meaningful\" results.\n size : int or tuple of ints, optional\n Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are\n generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because\n each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.\n If no shape is specified, a single (`N`-D) sample is returned.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Notes\n -----\n The mean is a coordinate in N-dimensional space, which represents the\n location where samples are most likely to be generated. This is\n analogous to the peak of the bell curve for the one-dimensional or\n univariate normal distribution.\n\n Covariance indicates the level to which two variables vary together.\n From the multivariate normal distribution, w""e draw N-dimensional\n samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix\n element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.\n The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its\n \"spread\").\n\n Instead of specifying the full covariance matrix, popular\n approximations include:\n\n - Spherical covariance (*cov* is a multiple of the identity matrix)\n - Diagonal covariance (*cov* has non-negative elements, and only on\n the diagonal)\n\n This geometrical property can be seen in two dimensions by plotting\n generated data-points:\n\n >>> mean = [0,0]\n >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis\n\n >>> import matplotlib.pyplot as plt\n >>> x,y = np.random.multivariate_normal(mean,cov,5000).T\n >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()\n\n Note that the covariance matrix must be non-negative definite.\n\n References\n ----------\n Papoulis, A., *Probability, Random Variables, and Stochastic Processes*,\n 3rd ed., New York: McGraw-Hill, 1991.\n\n Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*,\n 2nd ed., New York: Wiley, 2001.\n\n Examples\n --------\n >>> mean = (1,2)\n >>> cov = [[1,0],[1,0]]\n >>> x = np.random.multivariate_normal(mean,cov,(3,3))\n >>> x.shape\n (3, 3, 2)\n\n The following is probably true, given that 0.6 is roughly twice the\n standard deviation:\n\n >>> print list( (x[0,0,:] - mean) < 0.6 )\n [True, True]\n\n ";
+static char __pyx_k_279[] = "RandomState.multinomial (line 4182)";
+static char __pyx_k_280[] = "\n multinomial(n, pvals, size=None)\n\n Draw samples from a multinomial distribution.\n\n The multinomial distribution is a multivariate generalisation of the\n binomial distribution. Take an experiment with one of ``p``\n possible outcomes. An example of such an experiment is throwing a dice,\n where the outcome can be 1 through 6. Each sample drawn from the\n distribution represents `n` such experiments. Its values,\n ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome\n was ``i``.\n\n Parameters\n ----------\n n : int\n Number of experiments.\n pvals : sequence of floats, length p\n Probabilities of each of the ``p`` different outcomes. These\n should sum to 1 (however, the last element is always assumed to\n account for the remaining probability, as long as\n ``sum(pvals[:-1]) <= 1)``.\n size : tuple of ints\n Given a `size` of ``(M, N, K)``, then ``M*N*K`` samples are drawn,\n and the output shape becomes ``(M, N, K, p)``, since each sample\n has shape ``(p,)``.\n\n Examples\n --------\n Throw a dice 20 times:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=1)\n array([[4, 1, 7, 5, 2, 1]])\n\n It landed 4 times on 1, once on 2, etc.\n\n Now, throw the dice 20 times, and 20 times again:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=2)\n array([[3, 4, 3, 3, 4, 3],\n [2, 4, 3, 4, 0, 7]])\n\n For the first run, we threw 3 times 1, 4 times 2, etc. For the second,\n we threw 2 times 1, 4 times 2, etc.\n\n A loaded dice is more likely to land on number 6:\n\n >>> np.random.multinomial(100, [1/7.]*5)\n array([13, 16, 13, 16, 42])\n\n ";
+static char __pyx_k_281[] = "RandomState.dirichlet (line 4275)";
+static char __pyx_k_282[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : array\n Number of samples to draw.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (alpha.ndim, size).\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece had, on average, a designated\n average length, but allowing some variation in the relative sizes of the\n pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')""\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n ";
+static char __pyx_k_283[] = "RandomState.shuffle (line 4391)";
+static char __pyx_k_284[] = "\n shuffle(x)\n\n Modify a sequence in-place by shuffling its contents.\n\n Parameters\n ----------\n x : array_like\n The array or list to be shuffled.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> arr = np.arange(10)\n >>> np.random.shuffle(arr)\n >>> arr\n [1 7 5 2 9 4 3 6 0 8]\n\n This function only shuffles the array along the first index of a\n multi-dimensional array:\n\n >>> arr = np.arange(9).reshape((3, 3))\n >>> np.random.shuffle(arr)\n >>> arr\n array([[3, 4, 5],\n [6, 7, 8],\n [0, 1, 2]])\n\n ";
+static char __pyx_k_285[] = "RandomState.permutation (line 4449)";
+static char __pyx_k_286[] = "\n permutation(x)\n\n Randomly permute a sequence, or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Parameters\n ----------\n x : int or array_like\n If `x` is an integer, randomly permute ``np.arange(x)``.\n If `x` is an array, make a copy and shuffle the elements\n randomly.\n\n Returns\n -------\n out : ndarray\n Permuted sequence or array range.\n\n Examples\n --------\n >>> np.random.permutation(10)\n array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6])\n\n >>> np.random.permutation([1, 4, 9, 12, 15])\n array([15, 1, 9, 4, 12])\n\n >>> arr = np.arange(9).reshape((3, 3))\n >>> np.random.permutation(arr)\n array([[6, 7, 8],\n [0, 1, 2],\n [3, 4, 5]])\n\n ";
static char __pyx_k__df[] = "df";
static char __pyx_k__mu[] = "mu";
static char __pyx_k__np[] = "np";
@@ -983,7 +1091,8 @@ static char __pyx_k__svd[] = "svd";
static char __pyx_k__beta[] = "beta";
static char __pyx_k__copy[] = "copy";
static char __pyx_k__high[] = "high";
-static char __pyx_k__join[] = "join";
+static char __pyx_k__intp[] = "intp";
+static char __pyx_k__item[] = "item";
static char __pyx_k__left[] = "left";
static char __pyx_k__less[] = "less";
static char __pyx_k__mean[] = "mean";
@@ -991,10 +1100,12 @@ static char __pyx_k__mode[] = "mode";
static char __pyx_k__nbad[] = "nbad";
static char __pyx_k__ndim[] = "ndim";
static char __pyx_k__nonc[] = "nonc";
+static char __pyx_k__prod[] = "prod";
static char __pyx_k__rand[] = "rand";
static char __pyx_k__seed[] = "seed";
static char __pyx_k__side[] = "side";
static char __pyx_k__size[] = "size";
+static char __pyx_k__sort[] = "sort";
static char __pyx_k__sqrt[] = "sqrt";
static char __pyx_k__take[] = "take";
static char __pyx_k__uint[] = "uint";
@@ -1011,6 +1122,7 @@ static char __pyx_k__empty[] = "empty";
static char __pyx_k__equal[] = "equal";
static char __pyx_k__gamma[] = "gamma";
static char __pyx_k__iinfo[] = "iinfo";
+static char __pyx_k__index[] = "index";
static char __pyx_k__kappa[] = "kappa";
static char __pyx_k__ndmin[] = "ndmin";
static char __pyx_k__ngood[] = "ngood";
@@ -1018,6 +1130,7 @@ static char __pyx_k__numpy[] = "numpy";
static char __pyx_k__power[] = "power";
static char __pyx_k__pvals[] = "pvals";
static char __pyx_k__randn[] = "randn";
+static char __pyx_k__ravel[] = "ravel";
static char __pyx_k__right[] = "right";
static char __pyx_k__scale[] = "scale";
static char __pyx_k__shape[] = "shape";
@@ -1054,6 +1167,7 @@ static char __pyx_k__allclose[] = "allclose";
static char __pyx_k__binomial[] = "binomial";
static char __pyx_k__logistic[] = "logistic";
static char __pyx_k__multiply[] = "multiply";
+static char __pyx_k__operator[] = "operator";
static char __pyx_k__rayleigh[] = "rayleigh";
static char __pyx_k__subtract[] = "subtract";
static char __pyx_k__vonmises[] = "vonmises";
@@ -1066,6 +1180,7 @@ static char __pyx_k__lognormal[] = "lognormal";
static char __pyx_k__logseries[] = "logseries";
static char __pyx_k__set_state[] = "set_state";
static char __pyx_k__ValueError[] = "ValueError";
+static char __pyx_k____import__[] = "__import__";
static char __pyx_k__less_equal[] = "less_equal";
static char __pyx_k__standard_t[] = "standard_t";
static char __pyx_k__triangular[] = "triangular";
@@ -1073,6 +1188,7 @@ static char __pyx_k__exponential[] = "exponential";
static char __pyx_k__multinomial[] = "multinomial";
static char __pyx_k__permutation[] = "permutation";
static char __pyx_k__noncentral_f[] = "noncentral_f";
+static char __pyx_k__return_index[] = "return_index";
static char __pyx_k__searchsorted[] = "searchsorted";
static char __pyx_k__greater_equal[] = "greater_equal";
static char __pyx_k__random_sample[] = "random_sample";
@@ -1086,49 +1202,41 @@ static char __pyx_k__negative_binomial[] = "negative_binomial";
static char __pyx_k____RandomState_ctor[] = "__RandomState_ctor";
static char __pyx_k__multivariate_normal[] = "multivariate_normal";
static PyObject *__pyx_kp_s_1;
-static PyObject *__pyx_kp_s_107;
-static PyObject *__pyx_kp_s_109;
-static PyObject *__pyx_kp_s_11;
-static PyObject *__pyx_kp_s_113;
-static PyObject *__pyx_kp_s_115;
+static PyObject *__pyx_kp_s_112;
+static PyObject *__pyx_kp_s_114;
static PyObject *__pyx_kp_s_118;
-static PyObject *__pyx_kp_s_121;
+static PyObject *__pyx_kp_s_120;
static PyObject *__pyx_kp_s_123;
-static PyObject *__pyx_kp_s_125;
+static PyObject *__pyx_kp_s_126;
+static PyObject *__pyx_kp_s_128;
static PyObject *__pyx_kp_s_13;
static PyObject *__pyx_kp_s_130;
-static PyObject *__pyx_kp_s_132;
-static PyObject *__pyx_kp_s_134;
-static PyObject *__pyx_kp_s_146;
-static PyObject *__pyx_kp_s_148;
-static PyObject *__pyx_kp_s_151;
-static PyObject *__pyx_kp_s_153;
-static PyObject *__pyx_kp_s_156;
-static PyObject *__pyx_kp_s_158;
-static PyObject *__pyx_kp_s_16;
+static PyObject *__pyx_kp_s_135;
+static PyObject *__pyx_kp_s_137;
+static PyObject *__pyx_kp_s_139;
+static PyObject *__pyx_kp_s_144;
+static PyObject *__pyx_kp_s_15;
+static PyObject *__pyx_kp_s_152;
+static PyObject *__pyx_kp_s_154;
+static PyObject *__pyx_kp_s_157;
+static PyObject *__pyx_kp_s_159;
static PyObject *__pyx_kp_s_162;
static PyObject *__pyx_kp_s_164;
-static PyObject *__pyx_kp_s_166;
static PyObject *__pyx_kp_s_168;
+static PyObject *__pyx_kp_s_170;
+static PyObject *__pyx_kp_s_172;
static PyObject *__pyx_kp_s_174;
-static PyObject *__pyx_kp_s_176;
static PyObject *__pyx_kp_s_18;
static PyObject *__pyx_kp_s_180;
static PyObject *__pyx_kp_s_182;
-static PyObject *__pyx_kp_s_184;
-static PyObject *__pyx_n_s_186;
-static PyObject *__pyx_kp_s_187;
-static PyObject *__pyx_n_s_191;
-static PyObject *__pyx_n_s_192;
-static PyObject *__pyx_kp_u_193;
-static PyObject *__pyx_kp_u_194;
-static PyObject *__pyx_kp_u_195;
-static PyObject *__pyx_kp_u_196;
-static PyObject *__pyx_kp_u_197;
-static PyObject *__pyx_kp_u_198;
-static PyObject *__pyx_kp_u_199;
+static PyObject *__pyx_kp_s_186;
+static PyObject *__pyx_kp_s_188;
+static PyObject *__pyx_kp_s_190;
+static PyObject *__pyx_n_s_193;
+static PyObject *__pyx_kp_s_194;
+static PyObject *__pyx_n_s_199;
static PyObject *__pyx_kp_s_20;
-static PyObject *__pyx_kp_u_200;
+static PyObject *__pyx_n_s_200;
static PyObject *__pyx_kp_u_201;
static PyObject *__pyx_kp_u_202;
static PyObject *__pyx_kp_u_203;
@@ -1210,28 +1318,37 @@ static PyObject *__pyx_kp_u_275;
static PyObject *__pyx_kp_u_276;
static PyObject *__pyx_kp_u_277;
static PyObject *__pyx_kp_u_278;
+static PyObject *__pyx_kp_u_279;
static PyObject *__pyx_kp_s_28;
+static PyObject *__pyx_kp_u_280;
+static PyObject *__pyx_kp_u_281;
+static PyObject *__pyx_kp_u_282;
+static PyObject *__pyx_kp_u_283;
+static PyObject *__pyx_kp_u_284;
+static PyObject *__pyx_kp_u_285;
+static PyObject *__pyx_kp_u_286;
static PyObject *__pyx_kp_s_30;
-static PyObject *__pyx_kp_s_31;
static PyObject *__pyx_kp_s_32;
-static PyObject *__pyx_kp_s_33;
-static PyObject *__pyx_kp_s_39;
-static PyObject *__pyx_kp_s_42;
+static PyObject *__pyx_kp_s_34;
+static PyObject *__pyx_kp_s_36;
static PyObject *__pyx_kp_s_44;
-static PyObject *__pyx_kp_s_51;
-static PyObject *__pyx_kp_s_61;
-static PyObject *__pyx_kp_s_63;
-static PyObject *__pyx_kp_s_65;
+static PyObject *__pyx_kp_s_47;
+static PyObject *__pyx_kp_s_49;
+static PyObject *__pyx_kp_s_56;
+static PyObject *__pyx_kp_s_66;
static PyObject *__pyx_kp_s_68;
+static PyObject *__pyx_kp_s_70;
static PyObject *__pyx_kp_s_73;
-static PyObject *__pyx_kp_s_77;
-static PyObject *__pyx_kp_s_79;
+static PyObject *__pyx_kp_s_78;
+static PyObject *__pyx_kp_s_82;
static PyObject *__pyx_kp_s_84;
+static PyObject *__pyx_kp_s_89;
static PyObject *__pyx_kp_s_9;
static PyObject *__pyx_n_s__MT19937;
static PyObject *__pyx_n_s__TypeError;
static PyObject *__pyx_n_s__ValueError;
static PyObject *__pyx_n_s____RandomState_ctor;
+static PyObject *__pyx_n_s____import__;
static PyObject *__pyx_n_s____main__;
static PyObject *__pyx_n_s____test__;
static PyObject *__pyx_n_s___rand;
@@ -1273,9 +1390,11 @@ static PyObject *__pyx_n_s__gumbel;
static PyObject *__pyx_n_s__high;
static PyObject *__pyx_n_s__hypergeometric;
static PyObject *__pyx_n_s__iinfo;
+static PyObject *__pyx_n_s__index;
static PyObject *__pyx_n_s__int;
static PyObject *__pyx_n_s__integer;
-static PyObject *__pyx_n_s__join;
+static PyObject *__pyx_n_s__intp;
+static PyObject *__pyx_n_s__item;
static PyObject *__pyx_n_s__kappa;
static PyObject *__pyx_n_s__l;
static PyObject *__pyx_n_s__lam;
@@ -1308,12 +1427,14 @@ static PyObject *__pyx_n_s__normal;
static PyObject *__pyx_n_s__np;
static PyObject *__pyx_n_s__nsample;
static PyObject *__pyx_n_s__numpy;
+static PyObject *__pyx_n_s__operator;
static PyObject *__pyx_n_s__p;
static PyObject *__pyx_n_s__pareto;
static PyObject *__pyx_n_s__permutation;
static PyObject *__pyx_n_s__poisson;
static PyObject *__pyx_n_s__poisson_lam_max;
static PyObject *__pyx_n_s__power;
+static PyObject *__pyx_n_s__prod;
static PyObject *__pyx_n_s__pvals;
static PyObject *__pyx_n_s__rand;
static PyObject *__pyx_n_s__randint;
@@ -1321,9 +1442,11 @@ static PyObject *__pyx_n_s__randn;
static PyObject *__pyx_n_s__random;
static PyObject *__pyx_n_s__random_integers;
static PyObject *__pyx_n_s__random_sample;
+static PyObject *__pyx_n_s__ravel;
static PyObject *__pyx_n_s__rayleigh;
static PyObject *__pyx_n_s__reduce;
static PyObject *__pyx_n_s__replace;
+static PyObject *__pyx_n_s__return_index;
static PyObject *__pyx_n_s__right;
static PyObject *__pyx_n_s__scale;
static PyObject *__pyx_n_s__searchsorted;
@@ -1334,6 +1457,7 @@ static PyObject *__pyx_n_s__shuffle;
static PyObject *__pyx_n_s__side;
static PyObject *__pyx_n_s__sigma;
static PyObject *__pyx_n_s__size;
+static PyObject *__pyx_n_s__sort;
static PyObject *__pyx_n_s__sqrt;
static PyObject *__pyx_n_s__standard_cauchy;
static PyObject *__pyx_n_s__standard_gamma;
@@ -1355,25 +1479,27 @@ static PyObject *__pyx_n_s__zeros;
static PyObject *__pyx_n_s__zipf;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
+static PyObject *__pyx_int_3;
+static PyObject *__pyx_int_5;
static PyObject *__pyx_int_10;
static PyObject *__pyx_int_624;
-static PyObject *__pyx_k_15;
-static PyObject *__pyx_k_35;
-static PyObject *__pyx_k_36;
-static PyObject *__pyx_k_37;
-static PyObject *__pyx_k_38;
-static PyObject *__pyx_k_48;
-static PyObject *__pyx_k_54;
-static PyObject *__pyx_k_93;
-static PyObject *__pyx_k_94;
-static PyObject *__pyx_k_97;
+static PyObject *__pyx_k_17;
+static PyObject *__pyx_k_40;
+static PyObject *__pyx_k_41;
+static PyObject *__pyx_k_42;
+static PyObject *__pyx_k_43;
+static PyObject *__pyx_k_53;
+static PyObject *__pyx_k_59;
static PyObject *__pyx_k_98;
-static PyObject *__pyx_k_101;
+static PyObject *__pyx_k_99;
static PyObject *__pyx_k_102;
-static PyObject *__pyx_k_105;
+static PyObject *__pyx_k_103;
static PyObject *__pyx_k_106;
+static PyObject *__pyx_k_107;
+static PyObject *__pyx_k_110;
static PyObject *__pyx_k_111;
-static PyObject *__pyx_k_145;
+static PyObject *__pyx_k_116;
+static PyObject *__pyx_k_151;
static PyObject *__pyx_k_tuple_2;
static PyObject *__pyx_k_tuple_3;
static PyObject *__pyx_k_tuple_4;
@@ -1381,120 +1507,128 @@ static PyObject *__pyx_k_tuple_5;
static PyObject *__pyx_k_tuple_6;
static PyObject *__pyx_k_tuple_7;
static PyObject *__pyx_k_tuple_8;
+static PyObject *__pyx_k_slice_11;
+static PyObject *__pyx_k_slice_12;
static PyObject *__pyx_k_tuple_10;
-static PyObject *__pyx_k_tuple_12;
static PyObject *__pyx_k_tuple_14;
-static PyObject *__pyx_k_tuple_17;
+static PyObject *__pyx_k_tuple_16;
static PyObject *__pyx_k_tuple_19;
static PyObject *__pyx_k_tuple_21;
static PyObject *__pyx_k_tuple_23;
static PyObject *__pyx_k_tuple_25;
static PyObject *__pyx_k_tuple_27;
static PyObject *__pyx_k_tuple_29;
-static PyObject *__pyx_k_tuple_34;
-static PyObject *__pyx_k_tuple_40;
-static PyObject *__pyx_k_tuple_41;
-static PyObject *__pyx_k_tuple_43;
+static PyObject *__pyx_k_tuple_31;
+static PyObject *__pyx_k_tuple_33;
+static PyObject *__pyx_k_tuple_35;
+static PyObject *__pyx_k_tuple_37;
+static PyObject *__pyx_k_tuple_38;
+static PyObject *__pyx_k_tuple_39;
static PyObject *__pyx_k_tuple_45;
static PyObject *__pyx_k_tuple_46;
-static PyObject *__pyx_k_tuple_47;
-static PyObject *__pyx_k_tuple_49;
+static PyObject *__pyx_k_tuple_48;
static PyObject *__pyx_k_tuple_50;
+static PyObject *__pyx_k_tuple_51;
static PyObject *__pyx_k_tuple_52;
-static PyObject *__pyx_k_tuple_53;
+static PyObject *__pyx_k_tuple_54;
static PyObject *__pyx_k_tuple_55;
-static PyObject *__pyx_k_tuple_56;
static PyObject *__pyx_k_tuple_57;
static PyObject *__pyx_k_tuple_58;
-static PyObject *__pyx_k_tuple_59;
static PyObject *__pyx_k_tuple_60;
+static PyObject *__pyx_k_tuple_61;
static PyObject *__pyx_k_tuple_62;
+static PyObject *__pyx_k_tuple_63;
static PyObject *__pyx_k_tuple_64;
-static PyObject *__pyx_k_tuple_66;
+static PyObject *__pyx_k_tuple_65;
static PyObject *__pyx_k_tuple_67;
static PyObject *__pyx_k_tuple_69;
-static PyObject *__pyx_k_tuple_70;
static PyObject *__pyx_k_tuple_71;
static PyObject *__pyx_k_tuple_72;
static PyObject *__pyx_k_tuple_74;
static PyObject *__pyx_k_tuple_75;
static PyObject *__pyx_k_tuple_76;
-static PyObject *__pyx_k_tuple_78;
+static PyObject *__pyx_k_tuple_77;
+static PyObject *__pyx_k_tuple_79;
static PyObject *__pyx_k_tuple_80;
static PyObject *__pyx_k_tuple_81;
-static PyObject *__pyx_k_tuple_82;
static PyObject *__pyx_k_tuple_83;
static PyObject *__pyx_k_tuple_85;
static PyObject *__pyx_k_tuple_86;
static PyObject *__pyx_k_tuple_87;
static PyObject *__pyx_k_tuple_88;
-static PyObject *__pyx_k_tuple_89;
static PyObject *__pyx_k_tuple_90;
static PyObject *__pyx_k_tuple_91;
static PyObject *__pyx_k_tuple_92;
+static PyObject *__pyx_k_tuple_93;
+static PyObject *__pyx_k_tuple_94;
static PyObject *__pyx_k_tuple_95;
static PyObject *__pyx_k_tuple_96;
-static PyObject *__pyx_k_tuple_99;
+static PyObject *__pyx_k_tuple_97;
+static PyObject *__pyx_k_slice_192;
+static PyObject *__pyx_k_slice_196;
static PyObject *__pyx_k_tuple_100;
-static PyObject *__pyx_k_tuple_103;
+static PyObject *__pyx_k_tuple_101;
static PyObject *__pyx_k_tuple_104;
+static PyObject *__pyx_k_tuple_105;
static PyObject *__pyx_k_tuple_108;
-static PyObject *__pyx_k_tuple_110;
-static PyObject *__pyx_k_tuple_112;
-static PyObject *__pyx_k_tuple_114;
-static PyObject *__pyx_k_tuple_116;
+static PyObject *__pyx_k_tuple_109;
+static PyObject *__pyx_k_tuple_113;
+static PyObject *__pyx_k_tuple_115;
static PyObject *__pyx_k_tuple_117;
static PyObject *__pyx_k_tuple_119;
-static PyObject *__pyx_k_tuple_120;
+static PyObject *__pyx_k_tuple_121;
static PyObject *__pyx_k_tuple_122;
static PyObject *__pyx_k_tuple_124;
-static PyObject *__pyx_k_tuple_126;
+static PyObject *__pyx_k_tuple_125;
static PyObject *__pyx_k_tuple_127;
-static PyObject *__pyx_k_tuple_128;
static PyObject *__pyx_k_tuple_129;
static PyObject *__pyx_k_tuple_131;
+static PyObject *__pyx_k_tuple_132;
static PyObject *__pyx_k_tuple_133;
-static PyObject *__pyx_k_tuple_135;
+static PyObject *__pyx_k_tuple_134;
static PyObject *__pyx_k_tuple_136;
-static PyObject *__pyx_k_tuple_137;
static PyObject *__pyx_k_tuple_138;
-static PyObject *__pyx_k_tuple_139;
static PyObject *__pyx_k_tuple_140;
static PyObject *__pyx_k_tuple_141;
static PyObject *__pyx_k_tuple_142;
static PyObject *__pyx_k_tuple_143;
-static PyObject *__pyx_k_tuple_144;
+static PyObject *__pyx_k_tuple_145;
+static PyObject *__pyx_k_tuple_146;
static PyObject *__pyx_k_tuple_147;
+static PyObject *__pyx_k_tuple_148;
static PyObject *__pyx_k_tuple_149;
static PyObject *__pyx_k_tuple_150;
-static PyObject *__pyx_k_tuple_152;
-static PyObject *__pyx_k_tuple_154;
+static PyObject *__pyx_k_tuple_153;
static PyObject *__pyx_k_tuple_155;
-static PyObject *__pyx_k_tuple_157;
-static PyObject *__pyx_k_tuple_159;
+static PyObject *__pyx_k_tuple_156;
+static PyObject *__pyx_k_tuple_158;
static PyObject *__pyx_k_tuple_160;
static PyObject *__pyx_k_tuple_161;
static PyObject *__pyx_k_tuple_163;
static PyObject *__pyx_k_tuple_165;
+static PyObject *__pyx_k_tuple_166;
static PyObject *__pyx_k_tuple_167;
static PyObject *__pyx_k_tuple_169;
-static PyObject *__pyx_k_tuple_170;
static PyObject *__pyx_k_tuple_171;
-static PyObject *__pyx_k_tuple_172;
static PyObject *__pyx_k_tuple_173;
static PyObject *__pyx_k_tuple_175;
+static PyObject *__pyx_k_tuple_176;
static PyObject *__pyx_k_tuple_177;
static PyObject *__pyx_k_tuple_178;
static PyObject *__pyx_k_tuple_179;
static PyObject *__pyx_k_tuple_181;
static PyObject *__pyx_k_tuple_183;
+static PyObject *__pyx_k_tuple_184;
static PyObject *__pyx_k_tuple_185;
-static PyObject *__pyx_k_tuple_188;
+static PyObject *__pyx_k_tuple_187;
static PyObject *__pyx_k_tuple_189;
-static PyObject *__pyx_k_tuple_190;
+static PyObject *__pyx_k_tuple_191;
+static PyObject *__pyx_k_tuple_195;
+static PyObject *__pyx_k_tuple_197;
+static PyObject *__pyx_k_tuple_198;
-/* "mtrand.pyx":128
- * import numpy as np
+/* "mtrand.pyx":129
+ * import operator
*
* cdef object cont0_array(rk_state *state, rk_cont0 func, object size): # <<<<<<<<<<<<<<
* cdef double *array_data
@@ -1518,7 +1652,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont0_array", 0);
- /* "mtrand.pyx":134
+ /* "mtrand.pyx":135
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -1528,7 +1662,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":135
+ /* "mtrand.pyx":136
*
* if size is None:
* return func(state) # <<<<<<<<<<<<<<
@@ -1536,7 +1670,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
* array = <ndarray>np.empty(size, np.float64)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -1545,24 +1679,24 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":137
+ /* "mtrand.pyx":138
* return func(state)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -1570,15 +1704,17 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":138
+ /* "mtrand.pyx":139
* else:
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -1587,7 +1723,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":139
+ /* "mtrand.pyx":140
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -1596,7 +1732,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":140
+ /* "mtrand.pyx":141
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -1606,7 +1742,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":141
+ /* "mtrand.pyx":142
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state) # <<<<<<<<<<<<<<
@@ -1616,7 +1752,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state);
}
- /* "mtrand.pyx":142
+ /* "mtrand.pyx":143
* for i from 0 <= i < length:
* array_data[i] = func(state)
* return array # <<<<<<<<<<<<<<
@@ -1645,7 +1781,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":145
+/* "mtrand.pyx":146
*
*
* cdef object cont1_array_sc(rk_state *state, rk_cont1 func, object size, double a): # <<<<<<<<<<<<<<
@@ -1670,7 +1806,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont1_array_sc", 0);
- /* "mtrand.pyx":151
+ /* "mtrand.pyx":152
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -1680,7 +1816,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":152
+ /* "mtrand.pyx":153
*
* if size is None:
* return func(state, a) # <<<<<<<<<<<<<<
@@ -1688,7 +1824,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
* array = <ndarray>np.empty(size, np.float64)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -1697,24 +1833,24 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
}
/*else*/ {
- /* "mtrand.pyx":154
+ /* "mtrand.pyx":155
* return func(state, a)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -1722,15 +1858,17 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":155
+ /* "mtrand.pyx":156
* else:
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -1739,7 +1877,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":156
+ /* "mtrand.pyx":157
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -1748,7 +1886,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":157
+ /* "mtrand.pyx":158
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -1758,7 +1896,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":158
+ /* "mtrand.pyx":159
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, a) # <<<<<<<<<<<<<<
@@ -1768,7 +1906,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a);
}
- /* "mtrand.pyx":159
+ /* "mtrand.pyx":160
* for i from 0 <= i < length:
* array_data[i] = func(state, a)
* return array # <<<<<<<<<<<<<<
@@ -1797,7 +1935,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t
return __pyx_r;
}
-/* "mtrand.pyx":161
+/* "mtrand.pyx":162
* return array
*
* cdef object cont1_array(rk_state *state, rk_cont1 func, object size, ndarray oa): # <<<<<<<<<<<<<<
@@ -1817,15 +1955,15 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont1_array", 0);
- /* "mtrand.pyx":170
+ /* "mtrand.pyx":171
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -1835,20 +1973,22 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":172
+ /* "mtrand.pyx":173
* if size is None:
* array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
* PyArray_DIMS(oa) , NPY_DOUBLE) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_SimpleNew(PyArray_NDIM(__pyx_v_oa), PyArray_DIMS(__pyx_v_oa), NPY_DOUBLE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_SimpleNew(PyArray_NDIM(__pyx_v_oa), PyArray_DIMS(__pyx_v_oa), NPY_DOUBLE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":173
+ /* "mtrand.pyx":174
* array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
* PyArray_DIMS(oa) , NPY_DOUBLE)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -1857,7 +1997,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":174
+ /* "mtrand.pyx":175
* PyArray_DIMS(oa) , NPY_DOUBLE)
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -1866,30 +2006,32 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":175
+ /* "mtrand.pyx":176
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
* itera = <flatiter>PyArray_IterNew(<object>oa) # <<<<<<<<<<<<<<
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
*/
- __pyx_t_2 = PyArray_IterNew(((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayIterObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_IterNew(((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_itera = ((PyArrayIterObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":176
+ /* "mtrand.pyx":177
* array_data = <double *>PyArray_DATA(array)
* itera = <flatiter>PyArray_IterNew(<object>oa)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
* PyArray_ITER_NEXT(itera)
*/
- __pyx_t_3 = __pyx_v_length;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_length;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":177
+ /* "mtrand.pyx":178
* itera = <flatiter>PyArray_IterNew(<object>oa)
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0]) # <<<<<<<<<<<<<<
@@ -1898,7 +2040,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)__pyx_v_itera->dataptr)[0]));
- /* "mtrand.pyx":178
+ /* "mtrand.pyx":179
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
* PyArray_ITER_NEXT(itera) # <<<<<<<<<<<<<<
@@ -1911,24 +2053,24 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":180
+ /* "mtrand.pyx":181
* PyArray_ITER_NEXT(itera)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -1936,15 +2078,17 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":181
+ /* "mtrand.pyx":182
* else:
* array = <ndarray>np.empty(size, np.float64)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -1953,20 +2097,22 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":183
+ /* "mtrand.pyx":184
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array,
* <void *>oa) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(2, ((void *)arrayObject), ((void *)__pyx_v_oa)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 182; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)arrayObject), ((void *)__pyx_v_oa)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":184
+ /* "mtrand.pyx":185
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array,
* <void *>oa)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -1976,33 +2122,33 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":185
+ /* "mtrand.pyx":186
* <void *>oa)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":186
+ /* "mtrand.pyx":187
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0])
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":187
+ /* "mtrand.pyx":188
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -2011,7 +2157,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":188
+ /* "mtrand.pyx":189
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0]) # <<<<<<<<<<<<<<
@@ -2020,7 +2166,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]));
- /* "mtrand.pyx":189
+ /* "mtrand.pyx":190
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0])
* PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<<
@@ -2032,7 +2178,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
}
__pyx_L3:;
- /* "mtrand.pyx":190
+ /* "mtrand.pyx":191
* array_data[i] = func(state, oa_data[0])
* PyArray_MultiIter_NEXTi(multi, 1)
* return array # <<<<<<<<<<<<<<
@@ -2048,7 +2194,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.cont1_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -2061,7 +2207,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":192
+/* "mtrand.pyx":193
* return array
*
* cdef object cont2_array_sc(rk_state *state, rk_cont2 func, object size, double a, # <<<<<<<<<<<<<<
@@ -2086,7 +2232,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont2_array_sc", 0);
- /* "mtrand.pyx":199
+ /* "mtrand.pyx":200
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -2096,7 +2242,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":200
+ /* "mtrand.pyx":201
*
* if size is None:
* return func(state, a, b) # <<<<<<<<<<<<<<
@@ -2104,7 +2250,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
* array = <ndarray>np.empty(size, np.float64)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -2113,24 +2259,24 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
}
/*else*/ {
- /* "mtrand.pyx":202
+ /* "mtrand.pyx":203
* return func(state, a, b)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -2138,15 +2284,17 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":203
+ /* "mtrand.pyx":204
* else:
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -2155,7 +2303,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":204
+ /* "mtrand.pyx":205
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2164,7 +2312,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":205
+ /* "mtrand.pyx":206
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -2174,7 +2322,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":206
+ /* "mtrand.pyx":207
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, a, b) # <<<<<<<<<<<<<<
@@ -2184,7 +2332,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b);
}
- /* "mtrand.pyx":207
+ /* "mtrand.pyx":208
* for i from 0 <= i < length:
* array_data[i] = func(state, a, b)
* return array # <<<<<<<<<<<<<<
@@ -2213,7 +2361,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t
return __pyx_r;
}
-/* "mtrand.pyx":210
+/* "mtrand.pyx":211
*
*
* cdef object cont2_array(rk_state *state, rk_cont2 func, object size, # <<<<<<<<<<<<<<
@@ -2232,15 +2380,15 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont2_array", 0);
- /* "mtrand.pyx":220
+ /* "mtrand.pyx":221
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -2250,33 +2398,37 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":221
+ /* "mtrand.pyx":222
*
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>oa, <void *>ob) # <<<<<<<<<<<<<<
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_oa), ((void *)__pyx_v_ob)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_oa), ((void *)__pyx_v_ob)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_2)));
- __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":222
+ /* "mtrand.pyx":223
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>oa, <void *>ob)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) # <<<<<<<<<<<<<<
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
*/
- __pyx_t_2 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_DOUBLE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
arrayObject = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":223
+ /* "mtrand.pyx":224
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>oa, <void *>ob)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2285,17 +2437,17 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":224
+ /* "mtrand.pyx":225
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":225
+ /* "mtrand.pyx":226
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<<
@@ -2304,7 +2456,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0));
- /* "mtrand.pyx":226
+ /* "mtrand.pyx":227
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -2313,7 +2465,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_ob_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":227
+ /* "mtrand.pyx":228
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0], ob_data[0]) # <<<<<<<<<<<<<<
@@ -2322,7 +2474,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]), (__pyx_v_ob_data[0]));
- /* "mtrand.pyx":228
+ /* "mtrand.pyx":229
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0], ob_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -2335,24 +2487,24 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":230
+ /* "mtrand.pyx":231
* PyArray_MultiIter_NEXT(multi)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>oa, <void *>ob)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -2360,15 +2512,17 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":231
+ /* "mtrand.pyx":232
* else:
* array = <ndarray>np.empty(size, np.float64)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2377,20 +2531,22 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":232
+ /* "mtrand.pyx":233
* array = <ndarray>np.empty(size, np.float64)
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>oa, <void *>ob) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_oa), ((void *)__pyx_v_ob)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_oa), ((void *)__pyx_v_ob)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":233
+ /* "mtrand.pyx":234
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>oa, <void *>ob)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -2400,33 +2556,33 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":234
+ /* "mtrand.pyx":235
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>oa, <void *>ob)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":235
+ /* "mtrand.pyx":236
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":236
+ /* "mtrand.pyx":237
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -2435,7 +2591,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":237
+ /* "mtrand.pyx":238
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -2444,7 +2600,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_ob_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":238
+ /* "mtrand.pyx":239
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, oa_data[0], ob_data[0]) # <<<<<<<<<<<<<<
@@ -2453,7 +2609,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]), (__pyx_v_ob_data[0]));
- /* "mtrand.pyx":239
+ /* "mtrand.pyx":240
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, oa_data[0], ob_data[0])
* PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<<
@@ -2462,7 +2618,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
PyArray_MultiIter_NEXTi(__pyx_v_multi, 1);
- /* "mtrand.pyx":240
+ /* "mtrand.pyx":241
* array_data[i] = func(state, oa_data[0], ob_data[0])
* PyArray_MultiIter_NEXTi(multi, 1)
* PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<<
@@ -2474,7 +2630,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
}
__pyx_L3:;
- /* "mtrand.pyx":241
+ /* "mtrand.pyx":242
* PyArray_MultiIter_NEXTi(multi, 1)
* PyArray_MultiIter_NEXTi(multi, 2)
* return array # <<<<<<<<<<<<<<
@@ -2490,7 +2646,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.cont2_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -2502,7 +2658,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":243
+/* "mtrand.pyx":244
* return array
*
* cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a, # <<<<<<<<<<<<<<
@@ -2527,7 +2683,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont3_array_sc", 0);
- /* "mtrand.pyx":251
+ /* "mtrand.pyx":252
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -2537,7 +2693,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":252
+ /* "mtrand.pyx":253
*
* if size is None:
* return func(state, a, b, c) # <<<<<<<<<<<<<<
@@ -2545,7 +2701,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
* array = <ndarray>np.empty(size, np.float64)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b, __pyx_v_c)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(__pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b, __pyx_v_c)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -2554,24 +2710,24 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
}
/*else*/ {
- /* "mtrand.pyx":254
+ /* "mtrand.pyx":255
* return func(state, a, b, c)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -2579,15 +2735,17 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 254; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":255
+ /* "mtrand.pyx":256
* else:
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -2596,7 +2754,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":256
+ /* "mtrand.pyx":257
* array = <ndarray>np.empty(size, np.float64)
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2605,7 +2763,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":257
+ /* "mtrand.pyx":258
* length = PyArray_SIZE(array)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -2615,7 +2773,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":258
+ /* "mtrand.pyx":259
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, a, b, c) # <<<<<<<<<<<<<<
@@ -2625,7 +2783,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b, __pyx_v_c);
}
- /* "mtrand.pyx":259
+ /* "mtrand.pyx":260
* for i from 0 <= i < length:
* array_data[i] = func(state, a, b, c)
* return array # <<<<<<<<<<<<<<
@@ -2654,7 +2812,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t
return __pyx_r;
}
-/* "mtrand.pyx":261
+/* "mtrand.pyx":262
* return array
*
* cdef object cont3_array(rk_state *state, rk_cont3 func, object size, ndarray oa, # <<<<<<<<<<<<<<
@@ -2674,15 +2832,15 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("cont3_array", 0);
- /* "mtrand.pyx":273
+ /* "mtrand.pyx":274
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -2692,33 +2850,37 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":274
+ /* "mtrand.pyx":275
*
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>oa, <void *>ob, <void *>oc) # <<<<<<<<<<<<<<
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)__pyx_v_oa), ((void *)__pyx_v_ob), ((void *)__pyx_v_oc)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)__pyx_v_oa), ((void *)__pyx_v_ob), ((void *)__pyx_v_oc)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_2)));
- __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":275
+ /* "mtrand.pyx":276
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>oa, <void *>ob, <void *>oc)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) # <<<<<<<<<<<<<<
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
*/
- __pyx_t_2 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_DOUBLE); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
arrayObject = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":276
+ /* "mtrand.pyx":277
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>oa, <void *>ob, <void *>oc)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2727,17 +2889,17 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":277
+ /* "mtrand.pyx":278
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE)
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":278
+ /* "mtrand.pyx":279
* array_data = <double *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<<
@@ -2746,7 +2908,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0));
- /* "mtrand.pyx":279
+ /* "mtrand.pyx":280
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -2755,7 +2917,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_ob_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":280
+ /* "mtrand.pyx":281
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -2764,7 +2926,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oc_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":281
+ /* "mtrand.pyx":282
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) # <<<<<<<<<<<<<<
@@ -2773,7 +2935,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]), (__pyx_v_ob_data[0]), (__pyx_v_oc_data[0]));
- /* "mtrand.pyx":282
+ /* "mtrand.pyx":283
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -2786,24 +2948,24 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":284
+ /* "mtrand.pyx":285
* PyArray_MultiIter_NEXT(multi)
* else:
* array = <ndarray>np.empty(size, np.float64) # <<<<<<<<<<<<<<
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>oa,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -2811,15 +2973,17 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":285
+ /* "mtrand.pyx":286
* else:
* array = <ndarray>np.empty(size, np.float64)
* array_data = <double *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -2828,20 +2992,22 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((double *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":287
+ /* "mtrand.pyx":288
* array_data = <double *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>oa,
* <void *>ob, <void *>oc) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(4, ((void *)arrayObject), ((void *)__pyx_v_oa), ((void *)__pyx_v_ob), ((void *)__pyx_v_oc)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(4, ((void *)arrayObject), ((void *)__pyx_v_oa), ((void *)__pyx_v_ob), ((void *)__pyx_v_oc)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":288
+ /* "mtrand.pyx":289
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>oa,
* <void *>ob, <void *>oc)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -2851,33 +3017,33 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":289
+ /* "mtrand.pyx":290
* <void *>ob, <void *>oc)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":290
+ /* "mtrand.pyx":291
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":291
+ /* "mtrand.pyx":292
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -2886,7 +3052,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":292
+ /* "mtrand.pyx":293
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -2895,7 +3061,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_ob_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":293
+ /* "mtrand.pyx":294
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 3) # <<<<<<<<<<<<<<
@@ -2904,7 +3070,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oc_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 3));
- /* "mtrand.pyx":294
+ /* "mtrand.pyx":295
* ob_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 3)
* array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) # <<<<<<<<<<<<<<
@@ -2913,7 +3079,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]), (__pyx_v_ob_data[0]), (__pyx_v_oc_data[0]));
- /* "mtrand.pyx":295
+ /* "mtrand.pyx":296
* oc_data = <double *>PyArray_MultiIter_DATA(multi, 3)
* array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -2925,7 +3091,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
}
__pyx_L3:;
- /* "mtrand.pyx":296
+ /* "mtrand.pyx":297
* array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0])
* PyArray_MultiIter_NEXT(multi)
* return array # <<<<<<<<<<<<<<
@@ -2941,7 +3107,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.cont3_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -2953,7 +3119,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":298
+/* "mtrand.pyx":299
* return array
*
* cdef object disc0_array(rk_state *state, rk_disc0 func, object size): # <<<<<<<<<<<<<<
@@ -2978,7 +3144,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("disc0_array", 0);
- /* "mtrand.pyx":304
+ /* "mtrand.pyx":305
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -2988,7 +3154,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":305
+ /* "mtrand.pyx":306
*
* if size is None:
* return func(state) # <<<<<<<<<<<<<<
@@ -2996,7 +3162,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 305; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -3005,19 +3171,19 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":307
+ /* "mtrand.pyx":308
* return func(state)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -3025,15 +3191,17 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":308
+ /* "mtrand.pyx":309
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -3042,7 +3210,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":309
+ /* "mtrand.pyx":310
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3051,7 +3219,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":310
+ /* "mtrand.pyx":311
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -3061,7 +3229,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":311
+ /* "mtrand.pyx":312
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state) # <<<<<<<<<<<<<<
@@ -3071,7 +3239,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state);
}
- /* "mtrand.pyx":312
+ /* "mtrand.pyx":313
* for i from 0 <= i < length:
* array_data[i] = func(state)
* return array # <<<<<<<<<<<<<<
@@ -3100,7 +3268,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":314
+/* "mtrand.pyx":315
* return array
*
* cdef object discnp_array_sc(rk_state *state, rk_discnp func, object size, long n, double p): # <<<<<<<<<<<<<<
@@ -3125,7 +3293,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discnp_array_sc", 0);
- /* "mtrand.pyx":320
+ /* "mtrand.pyx":321
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -3135,7 +3303,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":321
+ /* "mtrand.pyx":322
*
* if size is None:
* return func(state, n, p) # <<<<<<<<<<<<<<
@@ -3143,7 +3311,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 322; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -3152,19 +3320,19 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
}
/*else*/ {
- /* "mtrand.pyx":323
+ /* "mtrand.pyx":324
* return func(state, n, p)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -3172,15 +3340,17 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":324
+ /* "mtrand.pyx":325
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -3189,7 +3359,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":325
+ /* "mtrand.pyx":326
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3198,7 +3368,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":326
+ /* "mtrand.pyx":327
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -3208,7 +3378,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":327
+ /* "mtrand.pyx":328
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, n, p) # <<<<<<<<<<<<<<
@@ -3218,7 +3388,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p);
}
- /* "mtrand.pyx":328
+ /* "mtrand.pyx":329
* for i from 0 <= i < length:
* array_data[i] = func(state, n, p)
* return array # <<<<<<<<<<<<<<
@@ -3247,7 +3417,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_
return __pyx_r;
}
-/* "mtrand.pyx":330
+/* "mtrand.pyx":331
* return array
*
* cdef object discnp_array(rk_state *state, rk_discnp func, object size, ndarray on, ndarray op): # <<<<<<<<<<<<<<
@@ -3266,15 +3436,15 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discnp_array", 0);
- /* "mtrand.pyx":339
+ /* "mtrand.pyx":340
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -3284,33 +3454,37 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":340
+ /* "mtrand.pyx":341
*
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) # <<<<<<<<<<<<<<
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_2)));
- __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":341
+ /* "mtrand.pyx":342
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
*/
- __pyx_t_2 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 341; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
arrayObject = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":342
+ /* "mtrand.pyx":343
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3319,17 +3493,17 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":343
+ /* "mtrand.pyx":344
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":344
+ /* "mtrand.pyx":345
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<<
@@ -3338,7 +3512,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_on_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 0));
- /* "mtrand.pyx":345
+ /* "mtrand.pyx":346
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -3347,7 +3521,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_op_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":346
+ /* "mtrand.pyx":347
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, on_data[0], op_data[0]) # <<<<<<<<<<<<<<
@@ -3356,7 +3530,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_op_data[0]));
- /* "mtrand.pyx":347
+ /* "mtrand.pyx":348
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -3369,19 +3543,19 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
}
/*else*/ {
- /* "mtrand.pyx":349
+ /* "mtrand.pyx":350
* PyArray_MultiIter_NEXT(multi)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -3389,15 +3563,17 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 349; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":350
+ /* "mtrand.pyx":351
* else:
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3406,20 +3582,22 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":351
+ /* "mtrand.pyx":352
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 352; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":352
+ /* "mtrand.pyx":353
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -3429,33 +3607,33 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":353
+ /* "mtrand.pyx":354
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":354
+ /* "mtrand.pyx":355
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":355
+ /* "mtrand.pyx":356
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -3464,7 +3642,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_on_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":356
+ /* "mtrand.pyx":357
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -3473,7 +3651,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_op_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":357
+ /* "mtrand.pyx":358
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], op_data[0]) # <<<<<<<<<<<<<<
@@ -3482,7 +3660,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_op_data[0]));
- /* "mtrand.pyx":358
+ /* "mtrand.pyx":359
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<<
@@ -3491,7 +3669,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
*/
PyArray_MultiIter_NEXTi(__pyx_v_multi, 1);
- /* "mtrand.pyx":359
+ /* "mtrand.pyx":360
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXTi(multi, 1)
* PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<<
@@ -3503,7 +3681,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
}
__pyx_L3:;
- /* "mtrand.pyx":361
+ /* "mtrand.pyx":362
* PyArray_MultiIter_NEXTi(multi, 2)
*
* return array # <<<<<<<<<<<<<<
@@ -3519,7 +3697,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.discnp_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -3531,7 +3709,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6
return __pyx_r;
}
-/* "mtrand.pyx":363
+/* "mtrand.pyx":364
* return array
*
* cdef object discdd_array_sc(rk_state *state, rk_discdd func, object size, double n, double p): # <<<<<<<<<<<<<<
@@ -3556,7 +3734,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discdd_array_sc", 0);
- /* "mtrand.pyx":369
+ /* "mtrand.pyx":370
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -3566,7 +3744,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":370
+ /* "mtrand.pyx":371
*
* if size is None:
* return func(state, n, p) # <<<<<<<<<<<<<<
@@ -3574,7 +3752,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -3583,19 +3761,19 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
}
/*else*/ {
- /* "mtrand.pyx":372
+ /* "mtrand.pyx":373
* return func(state, n, p)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -3603,15 +3781,17 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":373
+ /* "mtrand.pyx":374
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -3620,7 +3800,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":374
+ /* "mtrand.pyx":375
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3629,7 +3809,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":375
+ /* "mtrand.pyx":376
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -3639,7 +3819,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":376
+ /* "mtrand.pyx":377
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, n, p) # <<<<<<<<<<<<<<
@@ -3649,7 +3829,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p);
}
- /* "mtrand.pyx":377
+ /* "mtrand.pyx":378
* for i from 0 <= i < length:
* array_data[i] = func(state, n, p)
* return array # <<<<<<<<<<<<<<
@@ -3678,7 +3858,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_
return __pyx_r;
}
-/* "mtrand.pyx":379
+/* "mtrand.pyx":380
* return array
*
* cdef object discdd_array(rk_state *state, rk_discdd func, object size, ndarray on, ndarray op): # <<<<<<<<<<<<<<
@@ -3697,15 +3877,15 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discdd_array", 0);
- /* "mtrand.pyx":388
+ /* "mtrand.pyx":389
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -3715,33 +3895,37 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":389
+ /* "mtrand.pyx":390
*
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) # <<<<<<<<<<<<<<
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_2)));
- __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":390
+ /* "mtrand.pyx":391
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
*/
- __pyx_t_2 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
arrayObject = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":391
+ /* "mtrand.pyx":392
* multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3750,17 +3934,17 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":392
+ /* "mtrand.pyx":393
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":393
+ /* "mtrand.pyx":394
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<<
@@ -3769,7 +3953,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_on_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 0));
- /* "mtrand.pyx":394
+ /* "mtrand.pyx":395
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -3778,7 +3962,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_op_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":395
+ /* "mtrand.pyx":396
* on_data = <double *>PyArray_MultiIter_DATA(multi, 0)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, on_data[0], op_data[0]) # <<<<<<<<<<<<<<
@@ -3787,7 +3971,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_op_data[0]));
- /* "mtrand.pyx":396
+ /* "mtrand.pyx":397
* op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -3800,19 +3984,19 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
}
/*else*/ {
- /* "mtrand.pyx":398
+ /* "mtrand.pyx":399
* PyArray_MultiIter_NEXT(multi)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -3820,15 +4004,17 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":399
+ /* "mtrand.pyx":400
* else:
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -3837,20 +4023,22 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":400
+ /* "mtrand.pyx":401
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_op)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":401
+ /* "mtrand.pyx":402
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -3860,33 +4048,33 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":402
+ /* "mtrand.pyx":403
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":403
+ /* "mtrand.pyx":404
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":404
+ /* "mtrand.pyx":405
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -3895,7 +4083,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_on_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":405
+ /* "mtrand.pyx":406
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -3904,7 +4092,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
__pyx_v_op_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":406
+ /* "mtrand.pyx":407
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], op_data[0]) # <<<<<<<<<<<<<<
@@ -3913,7 +4101,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_op_data[0]));
- /* "mtrand.pyx":407
+ /* "mtrand.pyx":408
* op_data = <double *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<<
@@ -3922,7 +4110,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
*/
PyArray_MultiIter_NEXTi(__pyx_v_multi, 1);
- /* "mtrand.pyx":408
+ /* "mtrand.pyx":409
* array_data[i] = func(state, on_data[0], op_data[0])
* PyArray_MultiIter_NEXTi(multi, 1)
* PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<<
@@ -3934,7 +4122,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
}
__pyx_L3:;
- /* "mtrand.pyx":410
+ /* "mtrand.pyx":411
* PyArray_MultiIter_NEXTi(multi, 2)
*
* return array # <<<<<<<<<<<<<<
@@ -3950,7 +4138,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.discdd_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -3962,7 +4150,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6
return __pyx_r;
}
-/* "mtrand.pyx":412
+/* "mtrand.pyx":413
* return array
*
* cdef object discnmN_array_sc(rk_state *state, rk_discnmN func, object size, # <<<<<<<<<<<<<<
@@ -3987,7 +4175,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discnmN_array_sc", 0);
- /* "mtrand.pyx":419
+ /* "mtrand.pyx":420
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -3997,7 +4185,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":420
+ /* "mtrand.pyx":421
*
* if size is None:
* return func(state, n, m, N) # <<<<<<<<<<<<<<
@@ -4005,7 +4193,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_m, __pyx_v_N)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_m, __pyx_v_N)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 421; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -4014,19 +4202,19 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
}
/*else*/ {
- /* "mtrand.pyx":422
+ /* "mtrand.pyx":423
* return func(state, n, m, N)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -4034,15 +4222,17 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":423
+ /* "mtrand.pyx":424
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -4051,7 +4241,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":424
+ /* "mtrand.pyx":425
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4060,7 +4250,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":425
+ /* "mtrand.pyx":426
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -4070,7 +4260,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":426
+ /* "mtrand.pyx":427
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, n, m, N) # <<<<<<<<<<<<<<
@@ -4080,7 +4270,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_m, __pyx_v_N);
}
- /* "mtrand.pyx":427
+ /* "mtrand.pyx":428
* for i from 0 <= i < length:
* array_data[i] = func(state, n, m, N)
* return array # <<<<<<<<<<<<<<
@@ -4109,7 +4299,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx
return __pyx_r;
}
-/* "mtrand.pyx":429
+/* "mtrand.pyx":430
* return array
*
* cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, # <<<<<<<<<<<<<<
@@ -4129,15 +4319,15 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discnmN_array", 0);
- /* "mtrand.pyx":440
+ /* "mtrand.pyx":441
* cdef broadcast multi
*
* if size is None: # <<<<<<<<<<<<<<
@@ -4147,33 +4337,37 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":441
+ /* "mtrand.pyx":442
*
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>on, <void *>om, <void *>oN) # <<<<<<<<<<<<<<
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)__pyx_v_on), ((void *)__pyx_v_om), ((void *)__pyx_v_oN)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_MultiIterNew(3, ((void *)__pyx_v_on), ((void *)__pyx_v_om), ((void *)__pyx_v_oN)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_2)));
- __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":442
+ /* "mtrand.pyx":443
* if size is None:
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>on, <void *>om, <void *>oN)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
*/
- __pyx_t_2 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 442; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_SimpleNew(__pyx_v_multi->nd, __pyx_v_multi->dimensions, NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
arrayObject = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":443
+ /* "mtrand.pyx":444
* multi = <broadcast> PyArray_MultiIterNew(3, <void *>on, <void *>om, <void *>oN)
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4182,17 +4376,17 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":444
+ /* "mtrand.pyx":445
* array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":445
+ /* "mtrand.pyx":446
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0) # <<<<<<<<<<<<<<
@@ -4201,7 +4395,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_on_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 0));
- /* "mtrand.pyx":446
+ /* "mtrand.pyx":447
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -4210,7 +4404,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_om_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":447
+ /* "mtrand.pyx":448
* on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -4219,7 +4413,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_oN_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":448
+ /* "mtrand.pyx":449
* om_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) # <<<<<<<<<<<<<<
@@ -4228,7 +4422,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_om_data[0]), (__pyx_v_oN_data[0]));
- /* "mtrand.pyx":449
+ /* "mtrand.pyx":450
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 2)
* array_data[i] = func(state, on_data[0], om_data[0], oN_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -4241,19 +4435,19 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
}
/*else*/ {
- /* "mtrand.pyx":451
+ /* "mtrand.pyx":452
* PyArray_MultiIter_NEXT(multi)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>on, <void *>om,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -4261,15 +4455,17 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":452
+ /* "mtrand.pyx":453
* else:
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4278,20 +4474,22 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":454
+ /* "mtrand.pyx":455
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>on, <void *>om,
* <void *>oN) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(4, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_om), ((void *)__pyx_v_oN)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(4, ((void *)arrayObject), ((void *)__pyx_v_on), ((void *)__pyx_v_om), ((void *)__pyx_v_oN)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":455
+ /* "mtrand.pyx":456
* multi = <broadcast>PyArray_MultiIterNew(4, <void*>array, <void *>on, <void *>om,
* <void *>oN)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -4301,33 +4499,33 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":456
+ /* "mtrand.pyx":457
* <void *>oN)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_7), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":457
+ /* "mtrand.pyx":458
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 2)
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":458
+ /* "mtrand.pyx":459
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -4336,7 +4534,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_on_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":459
+ /* "mtrand.pyx":460
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 2) # <<<<<<<<<<<<<<
@@ -4345,7 +4543,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_om_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 2));
- /* "mtrand.pyx":460
+ /* "mtrand.pyx":461
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
* om_data = <long *>PyArray_MultiIter_DATA(multi, 2)
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 3) # <<<<<<<<<<<<<<
@@ -4354,7 +4552,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
__pyx_v_oN_data = ((long *)PyArray_MultiIter_DATA(__pyx_v_multi, 3));
- /* "mtrand.pyx":461
+ /* "mtrand.pyx":462
* om_data = <long *>PyArray_MultiIter_DATA(multi, 2)
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 3)
* array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) # <<<<<<<<<<<<<<
@@ -4363,7 +4561,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_om_data[0]), (__pyx_v_oN_data[0]));
- /* "mtrand.pyx":462
+ /* "mtrand.pyx":463
* oN_data = <long *>PyArray_MultiIter_DATA(multi, 3)
* array_data[i] = func(state, on_data[0], om_data[0], oN_data[0])
* PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<<
@@ -4375,7 +4573,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
}
__pyx_L3:;
- /* "mtrand.pyx":464
+ /* "mtrand.pyx":465
* PyArray_MultiIter_NEXT(multi)
*
* return array # <<<<<<<<<<<<<<
@@ -4391,7 +4589,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.discnmN_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -4403,7 +4601,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_
return __pyx_r;
}
-/* "mtrand.pyx":466
+/* "mtrand.pyx":467
* return array
*
* cdef object discd_array_sc(rk_state *state, rk_discd func, object size, double a): # <<<<<<<<<<<<<<
@@ -4428,7 +4626,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discd_array_sc", 0);
- /* "mtrand.pyx":472
+ /* "mtrand.pyx":473
* cdef npy_intp i
*
* if size is None: # <<<<<<<<<<<<<<
@@ -4438,7 +4636,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":473
+ /* "mtrand.pyx":474
*
* if size is None:
* return func(state, a) # <<<<<<<<<<<<<<
@@ -4446,7 +4644,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_a)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 473; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_func(__pyx_v_state, __pyx_v_a)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 474; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -4455,19 +4653,19 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
}
/*else*/ {
- /* "mtrand.pyx":475
+ /* "mtrand.pyx":476
* return func(state, a)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -4475,15 +4673,17 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- arrayObject = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":476
+ /* "mtrand.pyx":477
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -4492,7 +4692,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":477
+ /* "mtrand.pyx":478
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4501,7 +4701,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":478
+ /* "mtrand.pyx":479
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -4511,7 +4711,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
__pyx_t_5 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_5; __pyx_v_i++) {
- /* "mtrand.pyx":479
+ /* "mtrand.pyx":480
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* array_data[i] = func(state, a) # <<<<<<<<<<<<<<
@@ -4521,7 +4721,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a);
}
- /* "mtrand.pyx":480
+ /* "mtrand.pyx":481
* for i from 0 <= i < length:
* array_data[i] = func(state, a)
* return array # <<<<<<<<<<<<<<
@@ -4550,7 +4750,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t
return __pyx_r;
}
-/* "mtrand.pyx":482
+/* "mtrand.pyx":483
* return array
*
* cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa): # <<<<<<<<<<<<<<
@@ -4570,15 +4770,15 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- npy_intp __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ npy_intp __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("discd_array", 0);
- /* "mtrand.pyx":491
+ /* "mtrand.pyx":492
* cdef flatiter itera
*
* if size is None: # <<<<<<<<<<<<<<
@@ -4588,20 +4788,22 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":493
+ /* "mtrand.pyx":494
* if size is None:
* array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
* PyArray_DIMS(oa), NPY_LONG) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_2 = PyArray_SimpleNew(PyArray_NDIM(__pyx_v_oa), PyArray_DIMS(__pyx_v_oa), NPY_LONG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_SimpleNew(PyArray_NDIM(__pyx_v_oa), PyArray_DIMS(__pyx_v_oa), NPY_LONG); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":494
+ /* "mtrand.pyx":495
* array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
* PyArray_DIMS(oa), NPY_LONG)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -4610,7 +4812,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":495
+ /* "mtrand.pyx":496
* PyArray_DIMS(oa), NPY_LONG)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4619,30 +4821,32 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":496
+ /* "mtrand.pyx":497
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* itera = <flatiter>PyArray_IterNew(<object>oa) # <<<<<<<<<<<<<<
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
*/
- __pyx_t_2 = PyArray_IterNew(((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayIterObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_IterNew(((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_itera = ((PyArrayIterObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":497
+ /* "mtrand.pyx":498
* array_data = <long *>PyArray_DATA(array)
* itera = <flatiter>PyArray_IterNew(<object>oa)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
* PyArray_ITER_NEXT(itera)
*/
- __pyx_t_3 = __pyx_v_length;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_length;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":498
+ /* "mtrand.pyx":499
* itera = <flatiter>PyArray_IterNew(<object>oa)
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0]) # <<<<<<<<<<<<<<
@@ -4651,7 +4855,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)__pyx_v_itera->dataptr)[0]));
- /* "mtrand.pyx":499
+ /* "mtrand.pyx":500
* for i from 0 <= i < length:
* array_data[i] = func(state, (<double *>(itera.dataptr))[0])
* PyArray_ITER_NEXT(itera) # <<<<<<<<<<<<<<
@@ -4664,19 +4868,19 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
}
/*else*/ {
- /* "mtrand.pyx":501
+ /* "mtrand.pyx":502
* PyArray_ITER_NEXT(itera)
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
@@ -4684,15 +4888,17 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_2 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":502
+ /* "mtrand.pyx":503
* else:
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -4701,20 +4907,22 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":503
+ /* "mtrand.pyx":504
* array = <ndarray>np.empty(size, int)
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa) # <<<<<<<<<<<<<<
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
*/
- __pyx_t_5 = PyArray_MultiIterNew(2, ((void *)arrayObject), ((void *)__pyx_v_oa)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayMultiIterObject *)__pyx_t_5)));
+ __pyx_t_2 = PyArray_MultiIterNew(2, ((void *)arrayObject), ((void *)__pyx_v_oa)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_5 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_multi = ((PyArrayMultiIterObject *)__pyx_t_5);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = 0;
- /* "mtrand.pyx":504
+ /* "mtrand.pyx":505
* array_data = <long *>PyArray_DATA(array)
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa)
* if (multi.size != PyArray_SIZE(array)): # <<<<<<<<<<<<<<
@@ -4724,33 +4932,33 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
__pyx_t_1 = (__pyx_v_multi->size != PyArray_SIZE(arrayObject));
if (__pyx_t_1) {
- /* "mtrand.pyx":505
+ /* "mtrand.pyx":506
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":506
+ /* "mtrand.pyx":507
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size: # <<<<<<<<<<<<<<
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0])
*/
- __pyx_t_3 = __pyx_v_multi->size;
- for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_3; __pyx_v_i++) {
+ __pyx_t_4 = __pyx_v_multi->size;
+ for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_4; __pyx_v_i++) {
- /* "mtrand.pyx":507
+ /* "mtrand.pyx":508
* raise ValueError("size is not compatible with inputs")
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1) # <<<<<<<<<<<<<<
@@ -4759,7 +4967,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
__pyx_v_oa_data = ((double *)PyArray_MultiIter_DATA(__pyx_v_multi, 1));
- /* "mtrand.pyx":508
+ /* "mtrand.pyx":509
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0]) # <<<<<<<<<<<<<<
@@ -4768,7 +4976,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
*/
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_oa_data[0]));
- /* "mtrand.pyx":509
+ /* "mtrand.pyx":510
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
* array_data[i] = func(state, oa_data[0])
* PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<<
@@ -4780,7 +4988,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
}
__pyx_L3:;
- /* "mtrand.pyx":510
+ /* "mtrand.pyx":511
* array_data[i] = func(state, oa_data[0])
* PyArray_MultiIter_NEXTi(multi, 1)
* return array # <<<<<<<<<<<<<<
@@ -4796,7 +5004,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.discd_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
@@ -4809,7 +5017,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m
return __pyx_r;
}
-/* "mtrand.pyx":512
+/* "mtrand.pyx":513
* return array
*
* cdef double kahan_sum(double *darr, npy_intp n): # <<<<<<<<<<<<<<
@@ -4828,7 +5036,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
npy_intp __pyx_t_1;
__Pyx_RefNannySetupContext("kahan_sum", 0);
- /* "mtrand.pyx":515
+ /* "mtrand.pyx":516
* cdef double c, y, t, sum
* cdef npy_intp i
* sum = darr[0] # <<<<<<<<<<<<<<
@@ -4837,7 +5045,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
*/
__pyx_v_sum = (__pyx_v_darr[0]);
- /* "mtrand.pyx":516
+ /* "mtrand.pyx":517
* cdef npy_intp i
* sum = darr[0]
* c = 0.0 # <<<<<<<<<<<<<<
@@ -4846,7 +5054,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
*/
__pyx_v_c = 0.0;
- /* "mtrand.pyx":517
+ /* "mtrand.pyx":518
* sum = darr[0]
* c = 0.0
* for i from 1 <= i < n: # <<<<<<<<<<<<<<
@@ -4856,7 +5064,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
__pyx_t_1 = __pyx_v_n;
for (__pyx_v_i = 1; __pyx_v_i < __pyx_t_1; __pyx_v_i++) {
- /* "mtrand.pyx":518
+ /* "mtrand.pyx":519
* c = 0.0
* for i from 1 <= i < n:
* y = darr[i] - c # <<<<<<<<<<<<<<
@@ -4865,7 +5073,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
*/
__pyx_v_y = ((__pyx_v_darr[__pyx_v_i]) - __pyx_v_c);
- /* "mtrand.pyx":519
+ /* "mtrand.pyx":520
* for i from 1 <= i < n:
* y = darr[i] - c
* t = sum + y # <<<<<<<<<<<<<<
@@ -4874,7 +5082,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
*/
__pyx_v_t = (__pyx_v_sum + __pyx_v_y);
- /* "mtrand.pyx":520
+ /* "mtrand.pyx":521
* y = darr[i] - c
* t = sum + y
* c = (t-sum) - y # <<<<<<<<<<<<<<
@@ -4883,7 +5091,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
*/
__pyx_v_c = ((__pyx_v_t - __pyx_v_sum) - __pyx_v_y);
- /* "mtrand.pyx":521
+ /* "mtrand.pyx":522
* t = sum + y
* c = (t-sum) - y
* sum = t # <<<<<<<<<<<<<<
@@ -4893,7 +5101,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
__pyx_v_sum = __pyx_v_t;
}
- /* "mtrand.pyx":522
+ /* "mtrand.pyx":523
* c = (t-sum) - y
* sum = t
* return sum # <<<<<<<<<<<<<<
@@ -4913,6 +5121,9 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n
static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_seed = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
@@ -4920,7 +5131,7 @@ static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyOb
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__seed,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":560
+ /* "mtrand.pyx":561
* poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
*
* def __init__(self, seed=None): # <<<<<<<<<<<<<<
@@ -4945,7 +5156,7 @@ static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyOb
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -4958,7 +5169,7 @@ static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyOb
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 560; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 561; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -4980,7 +5191,7 @@ static int __pyx_pf_6mtrand_11RandomState___init__(struct __pyx_obj_6mtrand_Rand
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__init__", 0);
- /* "mtrand.pyx":561
+ /* "mtrand.pyx":562
*
* def __init__(self, seed=None):
* self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state)) # <<<<<<<<<<<<<<
@@ -4989,21 +5200,21 @@ static int __pyx_pf_6mtrand_11RandomState___init__(struct __pyx_obj_6mtrand_Rand
*/
__pyx_v_self->internal_state = ((rk_state *)PyMem_Malloc((sizeof(rk_state))));
- /* "mtrand.pyx":563
+ /* "mtrand.pyx":564
* self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state))
*
* self.seed(seed) # <<<<<<<<<<<<<<
*
* def __dealloc__(self):
*/
- __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__seed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__seed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_seed);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_seed);
__Pyx_GIVEREF(__pyx_v_seed);
- __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
@@ -5031,7 +5242,7 @@ static void __pyx_pw_6mtrand_11RandomState_3__dealloc__(PyObject *__pyx_v_self)
__Pyx_RefNannyFinishContext();
}
-/* "mtrand.pyx":565
+/* "mtrand.pyx":566
* self.seed(seed)
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
@@ -5044,7 +5255,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
- /* "mtrand.pyx":566
+ /* "mtrand.pyx":567
*
* def __dealloc__(self):
* if self.internal_state != NULL: # <<<<<<<<<<<<<<
@@ -5054,7 +5265,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand
__pyx_t_1 = (__pyx_v_self->internal_state != NULL);
if (__pyx_t_1) {
- /* "mtrand.pyx":567
+ /* "mtrand.pyx":568
* def __dealloc__(self):
* if self.internal_state != NULL:
* PyMem_Free(self.internal_state) # <<<<<<<<<<<<<<
@@ -5063,7 +5274,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand
*/
PyMem_Free(__pyx_v_self->internal_state);
- /* "mtrand.pyx":568
+ /* "mtrand.pyx":569
* if self.internal_state != NULL:
* PyMem_Free(self.internal_state)
* self.internal_state = NULL # <<<<<<<<<<<<<<
@@ -5083,6 +5294,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, Py
static char __pyx_doc_6mtrand_11RandomState_4seed[] = "\n seed(seed=None)\n\n Seed the generator.\n\n This method is called when `RandomState` is initialized. It can be\n called again to re-seed the generator. For details, see `RandomState`.\n\n Parameters\n ----------\n seed : int or array_like, optional\n Seed for `RandomState`.\n\n See Also\n --------\n RandomState\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_seed = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("seed (wrapper)", 0);
@@ -5090,7 +5304,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, Py
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__seed,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":570
+ /* "mtrand.pyx":571
* self.internal_state = NULL
*
* def seed(self, seed=None): # <<<<<<<<<<<<<<
@@ -5115,7 +5329,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, Py
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "seed") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "seed") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -5128,7 +5342,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, Py
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("seed", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 570; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("seed", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 571; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.seed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -5154,7 +5368,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_4seed(struct __pyx_obj_6mtrand_R
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("seed", 0);
- /* "mtrand.pyx":591
+ /* "mtrand.pyx":592
* cdef rk_error errcode
* cdef ndarray obj "arrayObject_obj"
* if seed is None: # <<<<<<<<<<<<<<
@@ -5164,7 +5378,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_4seed(struct __pyx_obj_6mtrand_R
__pyx_t_1 = (__pyx_v_seed == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":592
+ /* "mtrand.pyx":593
* cdef ndarray obj "arrayObject_obj"
* if seed is None:
* errcode = rk_randomseed(self.internal_state) # <<<<<<<<<<<<<<
@@ -5175,7 +5389,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_4seed(struct __pyx_obj_6mtrand_R
goto __pyx_L3;
}
- /* "mtrand.pyx":593
+ /* "mtrand.pyx":594
* if seed is None:
* errcode = rk_randomseed(self.internal_state)
* elif type(seed) is int: # <<<<<<<<<<<<<<
@@ -5185,79 +5399,81 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_4seed(struct __pyx_obj_6mtrand_R
__pyx_t_1 = (((PyObject *)Py_TYPE(__pyx_v_seed)) == ((PyObject *)((PyObject*)(&PyInt_Type))));
if (__pyx_t_1) {
- /* "mtrand.pyx":594
+ /* "mtrand.pyx":595
* errcode = rk_randomseed(self.internal_state)
* elif type(seed) is int:
* rk_seed(seed, self.internal_state) # <<<<<<<<<<<<<<
* elif isinstance(seed, np.integer):
* iseed = int(seed)
*/
- __pyx_t_2 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_seed); if (unlikely((__pyx_t_2 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_seed); if (unlikely((__pyx_t_2 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
rk_seed(__pyx_t_2, __pyx_v_self->internal_state);
goto __pyx_L3;
}
- /* "mtrand.pyx":595
+ /* "mtrand.pyx":596
* elif type(seed) is int:
* rk_seed(seed, self.internal_state)
* elif isinstance(seed, np.integer): # <<<<<<<<<<<<<<
* iseed = int(seed)
* rk_seed(iseed, self.internal_state)
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__integer); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__integer); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_1 = PyObject_IsInstance(__pyx_v_seed, __pyx_t_4); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_IsInstance(__pyx_v_seed, __pyx_t_4); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":596
+ /* "mtrand.pyx":597
* rk_seed(seed, self.internal_state)
* elif isinstance(seed, np.integer):
* iseed = int(seed) # <<<<<<<<<<<<<<
* rk_seed(iseed, self.internal_state)
* else:
*/
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_seed);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_seed);
__Pyx_GIVEREF(__pyx_v_seed);
- __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyInt_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 596; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyInt_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__pyx_v_iseed = __pyx_t_3;
__pyx_t_3 = 0;
- /* "mtrand.pyx":597
+ /* "mtrand.pyx":598
* elif isinstance(seed, np.integer):
* iseed = int(seed)
* rk_seed(iseed, self.internal_state) # <<<<<<<<<<<<<<
* else:
* obj = <ndarray>PyArray_ContiguousFromObject(seed, NPY_LONG, 1, 1)
*/
- __pyx_t_2 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_iseed); if (unlikely((__pyx_t_2 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_AsUnsignedLong(__pyx_v_iseed); if (unlikely((__pyx_t_2 == (unsigned long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
rk_seed(__pyx_t_2, __pyx_v_self->internal_state);
goto __pyx_L3;
}
/*else*/ {
- /* "mtrand.pyx":599
+ /* "mtrand.pyx":600
* rk_seed(iseed, self.internal_state)
* else:
* obj = <ndarray>PyArray_ContiguousFromObject(seed, NPY_LONG, 1, 1) # <<<<<<<<<<<<<<
* init_by_array(self.internal_state, <unsigned long *>PyArray_DATA(obj),
* PyArray_DIM(obj, 0))
*/
- __pyx_t_3 = PyArray_ContiguousFromObject(__pyx_v_seed, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyArray_ContiguousFromObject(__pyx_v_seed, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
- arrayObject_obj = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_4 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ arrayObject_obj = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_4 = 0;
- /* "mtrand.pyx":601
+ /* "mtrand.pyx":602
* obj = <ndarray>PyArray_ContiguousFromObject(seed, NPY_LONG, 1, 1)
* init_by_array(self.internal_state, <unsigned long *>PyArray_DATA(obj),
* PyArray_DIM(obj, 0)) # <<<<<<<<<<<<<<
@@ -5295,7 +5511,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_7get_state(PyObject *__pyx_v_sel
return __pyx_r;
}
-/* "mtrand.pyx":603
+/* "mtrand.pyx":604
* PyArray_DIM(obj, 0))
*
* def get_state(self): # <<<<<<<<<<<<<<
@@ -5316,24 +5532,24 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_state", 0);
- /* "mtrand.pyx":634
+ /* "mtrand.pyx":635
* """
* cdef ndarray state "arrayObject_state"
* state = <ndarray>np.empty(624, np.uint) # <<<<<<<<<<<<<<
* memcpy(<void*>PyArray_DATA(state), <void*>(self.internal_state.key), 624*sizeof(long))
* state = <ndarray>np.asarray(state, np.uint32)
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__empty); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__uint); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__uint); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_int_624);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_int_624);
@@ -5341,15 +5557,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 634; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
- arrayObject_state = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_1 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ arrayObject_state = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
- /* "mtrand.pyx":635
+ /* "mtrand.pyx":636
* cdef ndarray state "arrayObject_state"
* state = <ndarray>np.empty(624, np.uint)
* memcpy(<void*>PyArray_DATA(state), <void*>(self.internal_state.key), 624*sizeof(long)) # <<<<<<<<<<<<<<
@@ -5358,41 +5576,43 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr
*/
memcpy(((void *)PyArray_DATA(arrayObject_state)), ((void *)__pyx_v_self->internal_state->key), (624 * (sizeof(long))));
- /* "mtrand.pyx":636
+ /* "mtrand.pyx":637
* state = <ndarray>np.empty(624, np.uint)
* memcpy(<void*>PyArray_DATA(state), <void*>(self.internal_state.key), 624*sizeof(long))
* state = <ndarray>np.asarray(state, np.uint32) # <<<<<<<<<<<<<<
* return ('MT19937', state, self.internal_state.pos,
* self.internal_state.has_gauss, self.internal_state.gauss)
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__asarray); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__asarray); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__uint32); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__uint32); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)arrayObject_state));
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)arrayObject_state));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)arrayObject_state));
__Pyx_GIVEREF(((PyObject *)arrayObject_state));
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __Pyx_DECREF(((PyObject *)arrayObject_state));
- arrayObject_state = ((PyArrayObject *)__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_1 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)arrayObject_state));
+ arrayObject_state = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
- /* "mtrand.pyx":637
+ /* "mtrand.pyx":638
* memcpy(<void*>PyArray_DATA(state), <void*>(self.internal_state.key), 624*sizeof(long))
* state = <ndarray>np.asarray(state, np.uint32)
* return ('MT19937', state, self.internal_state.pos, # <<<<<<<<<<<<<<
@@ -5400,21 +5620,21 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr
*
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_self->internal_state->pos); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = PyInt_FromLong(__pyx_v_self->internal_state->pos); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
- /* "mtrand.pyx":638
+ /* "mtrand.pyx":639
* state = <ndarray>np.asarray(state, np.uint32)
* return ('MT19937', state, self.internal_state.pos,
* self.internal_state.has_gauss, self.internal_state.gauss) # <<<<<<<<<<<<<<
*
* def set_state(self, state):
*/
- __pyx_t_3 = PyInt_FromLong(__pyx_v_self->internal_state->has_gauss); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyInt_FromLong(__pyx_v_self->internal_state->has_gauss); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_self->internal_state->gauss); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->internal_state->gauss); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_n_s__MT19937));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_n_s__MT19937));
@@ -5422,15 +5642,15 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr
__Pyx_INCREF(((PyObject *)arrayObject_state));
PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)arrayObject_state));
__Pyx_GIVEREF(((PyObject *)arrayObject_state));
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
- __pyx_t_1 = 0;
__pyx_r = ((PyObject *)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L0;
@@ -5463,7 +5683,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_9set_state(PyObject *__pyx_v_sel
return __pyx_r;
}
-/* "mtrand.pyx":640
+/* "mtrand.pyx":641
* self.internal_state.has_gauss, self.internal_state.gauss)
*
* def set_state(self, state): # <<<<<<<<<<<<<<
@@ -5491,60 +5711,61 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
- double __pyx_t_12;
+ PyObject *__pyx_t_12 = NULL;
+ double __pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("set_state", 0);
- /* "mtrand.pyx":689
+ /* "mtrand.pyx":690
* cdef ndarray obj "arrayObject_obj"
* cdef int pos
* algorithm_name = state[0] # <<<<<<<<<<<<<<
* if algorithm_name != 'MT19937':
* raise ValueError("algorithm must be 'MT19937'")
*/
- __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_state, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 689; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_state, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_algorithm_name = __pyx_t_1;
__pyx_t_1 = 0;
- /* "mtrand.pyx":690
+ /* "mtrand.pyx":691
* cdef int pos
* algorithm_name = state[0]
* if algorithm_name != 'MT19937': # <<<<<<<<<<<<<<
* raise ValueError("algorithm must be 'MT19937'")
* key, pos = state[1:3]
*/
- __pyx_t_1 = PyObject_RichCompare(__pyx_v_algorithm_name, ((PyObject *)__pyx_n_s__MT19937), Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_RichCompare(__pyx_v_algorithm_name, ((PyObject *)__pyx_n_s__MT19937), Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_2) {
- /* "mtrand.pyx":691
+ /* "mtrand.pyx":692
* algorithm_name = state[0]
* if algorithm_name != 'MT19937':
* raise ValueError("algorithm must be 'MT19937'") # <<<<<<<<<<<<<<
* key, pos = state[1:3]
* if len(state) == 3:
*/
- __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
- /* "mtrand.pyx":692
+ /* "mtrand.pyx":693
* if algorithm_name != 'MT19937':
* raise ValueError("algorithm must be 'MT19937'")
* key, pos = state[1:3] # <<<<<<<<<<<<<<
* if len(state) == 3:
* has_gauss = 0
*/
- __pyx_t_1 = __Pyx_PySequence_GetSlice(__pyx_v_state, 1, 3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 1, 3, NULL, NULL, &__pyx_k_slice_11, 1, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
PyObject* sequence = __pyx_t_1;
@@ -5556,7 +5777,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(PyTuple_CheckExact(sequence))) {
@@ -5569,14 +5790,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
- __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
{
Py_ssize_t index = -1;
- __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext;
@@ -5584,7 +5807,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_GOTREF(__pyx_t_3);
index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L4_unpacking_failed;
__Pyx_GOTREF(__pyx_t_4);
- if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = NULL;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L5_unpacking_done;
@@ -5592,27 +5815,27 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_L5_unpacking_done:;
}
- __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_key = __pyx_t_3;
__pyx_t_3 = 0;
__pyx_v_pos = __pyx_t_7;
- /* "mtrand.pyx":693
+ /* "mtrand.pyx":694
* raise ValueError("algorithm must be 'MT19937'")
* key, pos = state[1:3]
* if len(state) == 3: # <<<<<<<<<<<<<<
* has_gauss = 0
* cached_gaussian = 0.0
*/
- __pyx_t_8 = PyObject_Length(__pyx_v_state); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = PyObject_Length(__pyx_v_state); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 694; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_8 == 3);
if (__pyx_t_2) {
- /* "mtrand.pyx":694
+ /* "mtrand.pyx":695
* key, pos = state[1:3]
* if len(state) == 3:
* has_gauss = 0 # <<<<<<<<<<<<<<
@@ -5622,14 +5845,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_int_0);
__pyx_v_has_gauss = __pyx_int_0;
- /* "mtrand.pyx":695
+ /* "mtrand.pyx":696
* if len(state) == 3:
* has_gauss = 0
* cached_gaussian = 0.0 # <<<<<<<<<<<<<<
* else:
* has_gauss, cached_gaussian = state[3:5]
*/
- __pyx_t_1 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 695; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 696; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_cached_gaussian = __pyx_t_1;
__pyx_t_1 = 0;
@@ -5637,14 +5860,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
}
/*else*/ {
- /* "mtrand.pyx":697
+ /* "mtrand.pyx":698
* cached_gaussian = 0.0
* else:
* has_gauss, cached_gaussian = state[3:5] # <<<<<<<<<<<<<<
* try:
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1)
*/
- __pyx_t_1 = __Pyx_PySequence_GetSlice(__pyx_v_state, 3, 5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 3, 5, NULL, NULL, &__pyx_k_slice_12, 1, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) {
PyObject* sequence = __pyx_t_1;
@@ -5656,7 +5879,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(PyTuple_CheckExact(sequence))) {
@@ -5669,14 +5892,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_3);
#else
- __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
{
Py_ssize_t index = -1;
- __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext;
@@ -5684,7 +5909,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_GOTREF(__pyx_t_4);
index = 1; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L7_unpacking_failed;
__Pyx_GOTREF(__pyx_t_3);
- if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = NULL;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L8_unpacking_done;
@@ -5692,7 +5917,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_L8_unpacking_done:;
}
__pyx_v_has_gauss = __pyx_t_4;
@@ -5702,7 +5927,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
}
__pyx_L6:;
- /* "mtrand.pyx":698
+ /* "mtrand.pyx":699
* else:
* has_gauss, cached_gaussian = state[3:5]
* try: # <<<<<<<<<<<<<<
@@ -5716,18 +5941,20 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_XGOTREF(__pyx_t_11);
/*try:*/ {
- /* "mtrand.pyx":699
+ /* "mtrand.pyx":700
* has_gauss, cached_gaussian = state[3:5]
* try:
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) # <<<<<<<<<<<<<<
* except TypeError:
* # compatibility -- could be an older pickle
*/
- __pyx_t_1 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_ULONG, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 699; __pyx_clineno = __LINE__; goto __pyx_L9_error;}
+ __pyx_t_1 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_ULONG, 1, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L9_error;}
__Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_1)));
- arrayObject_obj = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_3 = __pyx_t_1;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ arrayObject_obj = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
}
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
@@ -5736,10 +5963,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__pyx_L9_error:;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
- /* "mtrand.pyx":700
+ /* "mtrand.pyx":701
* try:
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1)
* except TypeError: # <<<<<<<<<<<<<<
@@ -5749,26 +5976,28 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__pyx_t_7 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_7) {
__Pyx_AddTraceback("mtrand.RandomState.set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
- if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_3, &__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
- __Pyx_GOTREF(__pyx_t_1);
+ if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
__Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_4);
- /* "mtrand.pyx":702
+ /* "mtrand.pyx":703
* except TypeError:
* # compatibility -- could be an older pickle
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) # <<<<<<<<<<<<<<
* if PyArray_DIM(obj, 0) != 624:
* raise ValueError("state must be 624 longs")
*/
- __pyx_t_5 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
+ __pyx_t_5 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L11_except_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- __Pyx_XDECREF(((PyObject *)arrayObject_obj));
- arrayObject_obj = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_12 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(((PyObject *)arrayObject_obj));
+ arrayObject_obj = ((PyArrayObject *)__pyx_t_12);
+ __pyx_t_12 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L10_exception_handled;
}
@@ -5786,7 +6015,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__pyx_L16_try_end:;
}
- /* "mtrand.pyx":703
+ /* "mtrand.pyx":704
* # compatibility -- could be an older pickle
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1)
* if PyArray_DIM(obj, 0) != 624: # <<<<<<<<<<<<<<
@@ -5796,23 +6025,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__pyx_t_2 = (PyArray_DIM(arrayObject_obj, 0) != 624);
if (__pyx_t_2) {
- /* "mtrand.pyx":704
+ /* "mtrand.pyx":705
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1)
* if PyArray_DIM(obj, 0) != 624:
* raise ValueError("state must be 624 longs") # <<<<<<<<<<<<<<
* memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long))
* self.internal_state.pos = pos
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L19;
}
__pyx_L19:;
- /* "mtrand.pyx":705
+ /* "mtrand.pyx":706
* if PyArray_DIM(obj, 0) != 624:
* raise ValueError("state must be 624 longs")
* memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long)) # <<<<<<<<<<<<<<
@@ -5821,7 +6050,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
*/
memcpy(((void *)__pyx_v_self->internal_state->key), ((void *)PyArray_DATA(arrayObject_obj)), (624 * (sizeof(long))));
- /* "mtrand.pyx":706
+ /* "mtrand.pyx":707
* raise ValueError("state must be 624 longs")
* memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long))
* self.internal_state.pos = pos # <<<<<<<<<<<<<<
@@ -5830,25 +6059,25 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
*/
__pyx_v_self->internal_state->pos = __pyx_v_pos;
- /* "mtrand.pyx":707
+ /* "mtrand.pyx":708
* memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long))
* self.internal_state.pos = pos
* self.internal_state.has_gauss = has_gauss # <<<<<<<<<<<<<<
* self.internal_state.gauss = cached_gaussian
*
*/
- __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_v_has_gauss); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_v_has_gauss); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->internal_state->has_gauss = __pyx_t_7;
- /* "mtrand.pyx":708
+ /* "mtrand.pyx":709
* self.internal_state.pos = pos
* self.internal_state.has_gauss = has_gauss
* self.internal_state.gauss = cached_gaussian # <<<<<<<<<<<<<<
*
* # Pickling support:
*/
- __pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_v_cached_gaussian); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_v_self->internal_state->gauss = __pyx_t_12;
+ __pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_v_cached_gaussian); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_self->internal_state->gauss = __pyx_t_13;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
@@ -5857,6 +6086,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_12);
__Pyx_AddTraceback("mtrand.RandomState.set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
@@ -5881,7 +6111,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_11__getstate__(PyObject *__pyx_v
return __pyx_r;
}
-/* "mtrand.pyx":711
+/* "mtrand.pyx":712
*
* # Pickling support:
* def __getstate__(self): # <<<<<<<<<<<<<<
@@ -5899,7 +6129,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_10__getstate__(struct __pyx_obj_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getstate__", 0);
- /* "mtrand.pyx":712
+ /* "mtrand.pyx":713
* # Pickling support:
* def __getstate__(self):
* return self.get_state() # <<<<<<<<<<<<<<
@@ -5907,9 +6137,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_10__getstate__(struct __pyx_obj_
* def __setstate__(self, state):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__get_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 712; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__get_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 712; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
@@ -5940,7 +6170,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_13__setstate__(PyObject *__pyx_v
return __pyx_r;
}
-/* "mtrand.pyx":714
+/* "mtrand.pyx":715
* return self.get_state()
*
* def __setstate__(self, state): # <<<<<<<<<<<<<<
@@ -5959,21 +6189,21 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_12__setstate__(struct __pyx_obj_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate__", 0);
- /* "mtrand.pyx":715
+ /* "mtrand.pyx":716
*
* def __setstate__(self, state):
* self.set_state(state) # <<<<<<<<<<<<<<
*
* def __reduce__(self):
*/
- __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__set_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__set_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
- __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 715; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
@@ -6004,7 +6234,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_15__reduce__(PyObject *__pyx_v_s
return __pyx_r;
}
-/* "mtrand.pyx":717
+/* "mtrand.pyx":718
* self.set_state(state)
*
* def __reduce__(self): # <<<<<<<<<<<<<<
@@ -6023,7 +6253,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_14__reduce__(struct __pyx_obj_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce__", 0);
- /* "mtrand.pyx":718
+ /* "mtrand.pyx":719
*
* def __reduce__(self):
* return (np.random.__RandomState_ctor, (), self.get_state()) # <<<<<<<<<<<<<<
@@ -6031,20 +6261,20 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_14__reduce__(struct __pyx_obj_6m
* # Basic distributions:
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__random); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__random); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s____RandomState_ctor); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s____RandomState_ctor); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__get_state); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__get_state); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
@@ -6078,6 +6308,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_
static char __pyx_doc_6mtrand_11RandomState_16random_sample[] = "\n random_sample(size=None)\n\n Return random floats in the half-open interval [0.0, 1.0).\n\n Results are from the \"continuous uniform\" distribution over the\n stated interval. To sample :math:`Unif[a, b), b > a` multiply\n the output of `random_sample` by `(b-a)` and add `a`::\n\n (b - a) * random_sample() + a\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Defines the shape of the returned array of random floats. If None\n (the default), returns a single float.\n\n Returns\n -------\n out : float or ndarray of floats\n Array of random floats of shape `size` (unless ``size=None``, in which\n case a single float is returned).\n\n Examples\n --------\n >>> np.random.random_sample()\n 0.47108547995356098\n >>> type(np.random.random_sample())\n <type 'float'>\n >>> np.random.random_sample((5,))\n array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428])\n\n Three-by-two array of random numbers from [-5, 0):\n\n >>> 5 * np.random.random_sample((3, 2)) - 5\n array([[-3.99149989, -0.52338984],\n [-2.99091858, -0.79479508],\n [-1.23204345, -1.75224494]])\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("random_sample (wrapper)", 0);
@@ -6085,7 +6318,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__size,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":721
+ /* "mtrand.pyx":722
*
* # Basic distributions:
* def random_sample(self, size=None): # <<<<<<<<<<<<<<
@@ -6110,7 +6343,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_sample") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_sample") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -6123,7 +6356,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("random_sample", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("random_sample", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.random_sample", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -6143,7 +6376,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_16random_sample(struct __pyx_obj
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("random_sample", 0);
- /* "mtrand.pyx":762
+ /* "mtrand.pyx":763
*
* """
* return cont0_array(self.internal_state, rk_double, size) # <<<<<<<<<<<<<<
@@ -6151,7 +6384,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_16random_sample(struct __pyx_obj
* def tomaxint(self, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_double, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 762; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_double, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 763; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
@@ -6174,6 +6407,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_sel
static char __pyx_doc_6mtrand_11RandomState_18tomaxint[] = "\n tomaxint(size=None)\n\n Random integers between 0 and ``sys.maxint``, inclusive.\n\n Return a sample of uniformly distributed random integers in the interval\n [0, ``sys.maxint``].\n\n Parameters\n ----------\n size : tuple of ints, int, optional\n Shape of output. If this is, for example, (m,n,k), m*n*k samples\n are generated. If no shape is specified, a single sample is\n returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Uniform sampling over a given half-open interval of integers.\n random_integers : Uniform sampling over a given closed interval of\n integers.\n\n Examples\n --------\n >>> RS = np.random.mtrand.RandomState() # need a RandomState object\n >>> RS.tomaxint((2,2,2))\n array([[[1170048599, 1600360186],\n [ 739731006, 1947757578]],\n [[1871712945, 752307660],\n [1601631370, 1479324245]]])\n >>> import sys\n >>> sys.maxint\n 2147483647\n >>> RS.tomaxint((2,2,2)) < sys.maxint\n array([[[ True, True],\n [ True, True]],\n [[ True, True],\n [ True, True]]], dtype=bool)\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("tomaxint (wrapper)", 0);
@@ -6181,7 +6417,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_sel
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__size,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":764
+ /* "mtrand.pyx":765
* return cont0_array(self.internal_state, rk_double, size)
*
* def tomaxint(self, size=None): # <<<<<<<<<<<<<<
@@ -6206,7 +6442,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_sel
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tomaxint") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 764; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tomaxint") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -6219,7 +6455,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_sel
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("tomaxint", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 764; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("tomaxint", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.tomaxint", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -6239,7 +6475,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_18tomaxint(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("tomaxint", 0);
- /* "mtrand.pyx":809
+ /* "mtrand.pyx":810
*
* """
* return disc0_array(self.internal_state, rk_long, size) # <<<<<<<<<<<<<<
@@ -6247,7 +6483,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_18tomaxint(struct __pyx_obj_6mtr
* def randint(self, low, high=None, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __pyx_f_6mtrand_disc0_array(__pyx_v_self->internal_state, rk_long, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __pyx_f_6mtrand_disc0_array(__pyx_v_self->internal_state, rk_long, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 810; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
@@ -6272,6 +6508,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_21randint(PyObject *__pyx_v_self
PyObject *__pyx_v_low = 0;
PyObject *__pyx_v_high = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("randint (wrapper)", 0);
@@ -6279,7 +6518,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_21randint(PyObject *__pyx_v_self
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__low,&__pyx_n_s__high,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":811
+ /* "mtrand.pyx":812
* return disc0_array(self.internal_state, rk_long, size)
*
* def randint(self, low, high=None, size=None): # <<<<<<<<<<<<<<
@@ -6315,7 +6554,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_21randint(PyObject *__pyx_v_self
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "randint") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 811; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "randint") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -6332,7 +6571,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_21randint(PyObject *__pyx_v_self
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("randint", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 811; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("randint", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 812; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.randint", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -6365,7 +6604,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("randint", 0);
- /* "mtrand.pyx":868
+ /* "mtrand.pyx":869
* cdef npy_intp i
*
* if high is None: # <<<<<<<<<<<<<<
@@ -6375,7 +6614,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
__pyx_t_1 = (__pyx_v_high == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":869
+ /* "mtrand.pyx":870
*
* if high is None:
* lo = 0 # <<<<<<<<<<<<<<
@@ -6384,42 +6623,42 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_lo = 0;
- /* "mtrand.pyx":870
+ /* "mtrand.pyx":871
* if high is None:
* lo = 0
* hi = low # <<<<<<<<<<<<<<
* else:
* lo = low
*/
- __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_low); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_low); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_hi = __pyx_t_2;
goto __pyx_L3;
}
/*else*/ {
- /* "mtrand.pyx":872
+ /* "mtrand.pyx":873
* hi = low
* else:
* lo = low # <<<<<<<<<<<<<<
* hi = high
*
*/
- __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_low); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_low); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_lo = __pyx_t_2;
- /* "mtrand.pyx":873
+ /* "mtrand.pyx":874
* else:
* lo = low
* hi = high # <<<<<<<<<<<<<<
*
* if lo >= hi :
*/
- __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_high); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_high); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_hi = __pyx_t_2;
}
__pyx_L3:;
- /* "mtrand.pyx":875
+ /* "mtrand.pyx":876
* hi = high
*
* if lo >= hi : # <<<<<<<<<<<<<<
@@ -6429,23 +6668,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
__pyx_t_1 = (__pyx_v_lo >= __pyx_v_hi);
if (__pyx_t_1) {
- /* "mtrand.pyx":876
+ /* "mtrand.pyx":877
*
* if lo >= hi :
* raise ValueError("low >= high") # <<<<<<<<<<<<<<
*
* diff = <unsigned long>hi - <unsigned long>lo - 1UL
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":878
+ /* "mtrand.pyx":879
* raise ValueError("low >= high")
*
* diff = <unsigned long>hi - <unsigned long>lo - 1UL # <<<<<<<<<<<<<<
@@ -6454,7 +6693,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_diff = ((((unsigned long)__pyx_v_hi) - ((unsigned long)__pyx_v_lo)) - 1UL);
- /* "mtrand.pyx":879
+ /* "mtrand.pyx":880
*
* diff = <unsigned long>hi - <unsigned long>lo - 1UL
* if size is None: # <<<<<<<<<<<<<<
@@ -6464,7 +6703,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
__pyx_t_1 = (__pyx_v_size == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":880
+ /* "mtrand.pyx":881
* diff = <unsigned long>hi - <unsigned long>lo - 1UL
* if size is None:
* rv = lo + <long>rk_interval(diff, self. internal_state) # <<<<<<<<<<<<<<
@@ -6473,7 +6712,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_rv = (__pyx_v_lo + ((long)rk_interval(__pyx_v_diff, __pyx_v_self->internal_state)));
- /* "mtrand.pyx":881
+ /* "mtrand.pyx":882
* if size is None:
* rv = lo + <long>rk_interval(diff, self. internal_state)
* return rv # <<<<<<<<<<<<<<
@@ -6481,7 +6720,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
* array = <ndarray>np.empty(size, int)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = PyInt_FromLong(__pyx_v_rv); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 881; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyInt_FromLong(__pyx_v_rv); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
@@ -6490,19 +6729,19 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
}
/*else*/ {
- /* "mtrand.pyx":883
+ /* "mtrand.pyx":884
* return rv
* else:
* array = <ndarray>np.empty(size, int) # <<<<<<<<<<<<<<
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_size);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_size);
@@ -6510,15 +6749,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 883; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_5)));
- arrayObject = ((PyArrayObject *)__pyx_t_5);
+ __pyx_t_3 = __pyx_t_5;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ arrayObject = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":884
+ /* "mtrand.pyx":885
* else:
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array) # <<<<<<<<<<<<<<
@@ -6527,7 +6768,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_length = PyArray_SIZE(arrayObject);
- /* "mtrand.pyx":885
+ /* "mtrand.pyx":886
* array = <ndarray>np.empty(size, int)
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array) # <<<<<<<<<<<<<<
@@ -6536,7 +6777,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_array_data = ((long *)PyArray_DATA(arrayObject));
- /* "mtrand.pyx":886
+ /* "mtrand.pyx":887
* length = PyArray_SIZE(array)
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length: # <<<<<<<<<<<<<<
@@ -6546,7 +6787,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
__pyx_t_6 = __pyx_v_length;
for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_6; __pyx_v_i++) {
- /* "mtrand.pyx":887
+ /* "mtrand.pyx":888
* array_data = <long *>PyArray_DATA(array)
* for i from 0 <= i < length:
* rv = lo + <long>rk_interval(diff, self. internal_state) # <<<<<<<<<<<<<<
@@ -6555,7 +6796,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
*/
__pyx_v_rv = (__pyx_v_lo + ((long)rk_interval(__pyx_v_diff, __pyx_v_self->internal_state)));
- /* "mtrand.pyx":888
+ /* "mtrand.pyx":889
* for i from 0 <= i < length:
* rv = lo + <long>rk_interval(diff, self. internal_state)
* array_data[i] = rv # <<<<<<<<<<<<<<
@@ -6565,7 +6806,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra
(__pyx_v_array_data[__pyx_v_i]) = __pyx_v_rv;
}
- /* "mtrand.pyx":889
+ /* "mtrand.pyx":890
* rv = lo + <long>rk_interval(diff, self. internal_state)
* array_data[i] = rv
* return array # <<<<<<<<<<<<<<
@@ -6599,11 +6840,14 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_23bytes(PyObject *__pyx_v_self,
static char __pyx_doc_6mtrand_11RandomState_22bytes[] = "\n bytes(length)\n\n Return random bytes.\n\n Parameters\n ----------\n length : int\n Number of random bytes.\n\n Returns\n -------\n out : str\n String of length `length`.\n\n Examples\n --------\n >>> np.random.bytes(10)\n ' eh\\x85\\x022SZ\\xbf\\xa4' #random\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_23bytes(PyObject *__pyx_v_self, PyObject *__pyx_arg_length) {
npy_intp __pyx_v_length;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("bytes (wrapper)", 0);
assert(__pyx_arg_length); {
- __pyx_v_length = __Pyx_PyInt_from_py_npy_intp(__pyx_arg_length); if (unlikely((__pyx_v_length == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 891; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_v_length = __Pyx_PyInt_from_py_npy_intp(__pyx_arg_length); if (unlikely((__pyx_v_length == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 892; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
@@ -6616,7 +6860,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_23bytes(PyObject *__pyx_v_self,
return __pyx_r;
}
-/* "mtrand.pyx":891
+/* "mtrand.pyx":892
* return array
*
* def bytes(self, npy_intp length): # <<<<<<<<<<<<<<
@@ -6635,19 +6879,19 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_22bytes(struct __pyx_obj_6mtrand
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("bytes", 0);
- /* "mtrand.pyx":914
+ /* "mtrand.pyx":915
* """
* cdef void *bytes
* bytestring = empty_py_bytes(length, &bytes) # <<<<<<<<<<<<<<
* rk_fill(bytes, length, self.internal_state)
* return bytestring
*/
- __pyx_t_1 = empty_py_bytes(__pyx_v_length, (&__pyx_v_bytes)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = empty_py_bytes(__pyx_v_length, (&__pyx_v_bytes)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytestring = __pyx_t_1;
__pyx_t_1 = 0;
- /* "mtrand.pyx":915
+ /* "mtrand.pyx":916
* cdef void *bytes
* bytestring = empty_py_bytes(length, &bytes)
* rk_fill(bytes, length, self.internal_state) # <<<<<<<<<<<<<<
@@ -6656,7 +6900,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_22bytes(struct __pyx_obj_6mtrand
*/
rk_fill(__pyx_v_bytes, __pyx_v_length, __pyx_v_self->internal_state);
- /* "mtrand.pyx":916
+ /* "mtrand.pyx":917
* bytestring = empty_py_bytes(length, &bytes)
* rk_fill(bytes, length, self.internal_state)
* return bytestring # <<<<<<<<<<<<<<
@@ -6683,28 +6927,31 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_22bytes(struct __pyx_obj_6mtrand
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_24choice[] = "\n choice(a, size=1, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int\n Positive integer, the size of the sample.\n replace : boolean, optional\n Whether the sample is with or without replacement\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3)\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False)\n array([3,1,0])\n >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n "" Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_24choice[] = "\n choice(a, size=None, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n replace : boolean, optional\n Whether the sample is with or without replacement\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribtion over all\n entries in a.\n\n Returns\n --------\n samples : 1-D ndarray, shape (size,)\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3)\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n replacement:\n\n >>> np.random.choice(5, 3, replace=False)\n array([3,1,0])\n "" >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]\n\n Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_size = 0;
PyObject *__pyx_v_replace = 0;
PyObject *__pyx_v_p = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("choice (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__size,&__pyx_n_s__replace,&__pyx_n_s__p,0};
PyObject* values[4] = {0,0,0,0};
- values[1] = ((PyObject *)__pyx_int_1);
- values[2] = __pyx_k_15;
- /* "mtrand.pyx":919
+ /* "mtrand.pyx":920
*
*
- * def choice(self, a, size=1, replace=True, p=None): # <<<<<<<<<<<<<<
+ * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<<
* """
- * choice(a, size=1, replace=True, p=None)
+ * choice(a, size=None, replace=True, p=None)
*/
+ values[1] = ((PyObject *)Py_None);
+ values[2] = __pyx_k_17;
values[3] = ((PyObject *)Py_None);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
@@ -6739,7 +6986,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "choice") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "choice") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -6758,7 +7005,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("choice", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("choice", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.choice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -6771,897 +7018,1184 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self,
static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_a, PyObject *__pyx_v_size, PyObject *__pyx_v_replace, PyObject *__pyx_v_p) {
PyObject *__pyx_v_pop_size = NULL;
+ PyObject *__pyx_v_shape = NULL;
PyObject *__pyx_v_cdf = NULL;
PyObject *__pyx_v_uniform_samples = NULL;
PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_n_uniq = NULL;
PyObject *__pyx_v_found = NULL;
+ PyObject *__pyx_v_flat_found = NULL;
PyObject *__pyx_v_x = NULL;
PyObject *__pyx_v_new = NULL;
+ CYTHON_UNUSED PyObject *__pyx_v__ = NULL;
+ PyObject *__pyx_v_unique_indices = NULL;
+ PyObject *__pyx_v_res = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
+ PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
- int __pyx_t_7;
- Py_ssize_t __pyx_t_8;
- Py_ssize_t __pyx_t_9;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ int __pyx_t_11;
+ PyObject *(*__pyx_t_12)(PyObject *);
+ int __pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("choice", 0);
__Pyx_INCREF(__pyx_v_a);
+ __Pyx_INCREF(__pyx_v_size);
__Pyx_INCREF(__pyx_v_p);
- /* "mtrand.pyx":996
+ /* "mtrand.pyx":998
*
* # Format and Verify input
- * if isinstance(a, int): # <<<<<<<<<<<<<<
- * if a > 0:
- * pop_size = a #population size
+ * a = np.array(a, copy=False) # <<<<<<<<<<<<<<
+ * if a.ndim == 0:
+ * try:
*/
- __pyx_t_1 = ((PyObject *)((PyObject*)(&PyInt_Type)));
- __Pyx_INCREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_a, __pyx_t_1);
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (__pyx_t_2) {
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_a);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a);
+ __Pyx_GIVEREF(__pyx_v_a);
+ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_4 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__copy), __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_a);
+ __pyx_v_a = __pyx_t_4;
+ __pyx_t_4 = 0;
- /* "mtrand.pyx":997
+ /* "mtrand.pyx":999
* # Format and Verify input
- * if isinstance(a, int):
- * if a > 0: # <<<<<<<<<<<<<<
- * pop_size = a #population size
- * else:
+ * a = np.array(a, copy=False)
+ * if a.ndim == 0: # <<<<<<<<<<<<<<
+ * try:
+ * # __index__ must return an integer by python rules.
*/
- __pyx_t_1 = PyObject_RichCompare(__pyx_v_a, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (__pyx_t_2) {
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__ndim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 999; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_5) {
- /* "mtrand.pyx":998
- * if isinstance(a, int):
- * if a > 0:
- * pop_size = a #population size # <<<<<<<<<<<<<<
- * else:
+ /* "mtrand.pyx":1000
+ * a = np.array(a, copy=False)
+ * if a.ndim == 0:
+ * try: # <<<<<<<<<<<<<<
+ * # __index__ must return an integer by python rules.
+ * pop_size = operator.index(a.item())
+ */
+ {
+ __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
+ __Pyx_XGOTREF(__pyx_t_6);
+ __Pyx_XGOTREF(__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_8);
+ /*try:*/ {
+
+ /* "mtrand.pyx":1002
+ * try:
+ * # __index__ must return an integer by python rules.
+ * pop_size = operator.index(a.item()) # <<<<<<<<<<<<<<
+ * except TypeError:
+ * raise ValueError("a must be 1-dimensional or an integer")
+ */
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__operator); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__index); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__item); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_v_pop_size = __pyx_t_1;
+ __pyx_t_1 = 0;
+ }
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ goto __pyx_L11_try_end;
+ __pyx_L4_error:;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mtrand.pyx":1003
+ * # __index__ must return an integer by python rules.
+ * pop_size = operator.index(a.item())
+ * except TypeError: # <<<<<<<<<<<<<<
+ * raise ValueError("a must be 1-dimensional or an integer")
+ * if pop_size <= 0:
+ */
+ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
+ if (__pyx_t_9) {
+ __Pyx_AddTraceback("mtrand.RandomState.choice", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_3, &__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_t_4);
+
+ /* "mtrand.pyx":1004
+ * pop_size = operator.index(a.item())
+ * except TypeError:
+ * raise ValueError("a must be 1-dimensional or an integer") # <<<<<<<<<<<<<<
+ * if pop_size <= 0:
* raise ValueError("a must be greater than 0")
*/
- __Pyx_INCREF(__pyx_v_a);
- __pyx_v_pop_size = __pyx_v_a;
- goto __pyx_L4;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ goto __pyx_L5_exception_handled;
+ }
+ __pyx_L6_except_error:;
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
+ goto __pyx_L1_error;
+ __pyx_L5_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8);
+ __pyx_L11_try_end:;
}
- /*else*/ {
- /* "mtrand.pyx":1000
- * pop_size = a #population size
- * else:
+ /* "mtrand.pyx":1005
+ * except TypeError:
+ * raise ValueError("a must be 1-dimensional or an integer")
+ * if pop_size <= 0: # <<<<<<<<<<<<<<
+ * raise ValueError("a must be greater than 0")
+ * elif a.ndim != 1:
+ */
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_pop_size, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_5) {
+
+ /* "mtrand.pyx":1006
+ * raise ValueError("a must be 1-dimensional or an integer")
+ * if pop_size <= 0:
* raise ValueError("a must be greater than 0") # <<<<<<<<<<<<<<
- * else:
- * a = np.array(a, ndmin=1, copy=0)
+ * elif a.ndim != 1:
+ * raise ValueError("a must be 1-dimensional")
*/
- __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_17), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_Raise(__pyx_t_1, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L14;
}
- __pyx_L4:;
+ __pyx_L14:;
goto __pyx_L3;
}
- /*else*/ {
- /* "mtrand.pyx":1002
+ /* "mtrand.pyx":1007
+ * if pop_size <= 0:
* raise ValueError("a must be greater than 0")
+ * elif a.ndim != 1: # <<<<<<<<<<<<<<
+ * raise ValueError("a must be 1-dimensional")
* else:
- * a = np.array(a, ndmin=1, copy=0) # <<<<<<<<<<<<<<
- * if a.ndim != 1:
- * raise ValueError("a must be 1-dimensional")
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_a);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a);
- __Pyx_GIVEREF(__pyx_v_a);
- __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_4));
- if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__ndmin), __pyx_int_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__copy), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_a);
- __pyx_v_a = __pyx_t_5;
- __pyx_t_5 = 0;
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__ndim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1003
+ /* "mtrand.pyx":1008
+ * raise ValueError("a must be greater than 0")
+ * elif a.ndim != 1:
+ * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<<
* else:
- * a = np.array(a, ndmin=1, copy=0)
- * if a.ndim != 1: # <<<<<<<<<<<<<<
- * raise ValueError("a must be 1-dimensional")
- * pop_size = a.size
- */
- __pyx_t_5 = PyObject_GetAttr(__pyx_v_a, __pyx_n_s__ndim); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1003; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__pyx_t_2) {
-
- /* "mtrand.pyx":1004
- * a = np.array(a, ndmin=1, copy=0)
- * if a.ndim != 1:
- * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<<
- * pop_size = a.size
- * if pop_size is 0:
+ * pop_size = a.shape[0]
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_Raise(__pyx_t_4, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L5;
- }
- __pyx_L5:;
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_23), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L3;
+ }
+ /*else*/ {
- /* "mtrand.pyx":1005
- * if a.ndim != 1:
- * raise ValueError("a must be 1-dimensional")
- * pop_size = a.size # <<<<<<<<<<<<<<
+ /* "mtrand.pyx":1010
+ * raise ValueError("a must be 1-dimensional")
+ * else:
+ * pop_size = a.shape[0] # <<<<<<<<<<<<<<
* if pop_size is 0:
* raise ValueError("a must be non-empty")
*/
- __pyx_t_4 = PyObject_GetAttr(__pyx_v_a, __pyx_n_s__size); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_pop_size = __pyx_t_4;
__pyx_t_4 = 0;
- /* "mtrand.pyx":1006
- * raise ValueError("a must be 1-dimensional")
- * pop_size = a.size
+ /* "mtrand.pyx":1011
+ * else:
+ * pop_size = a.shape[0]
* if pop_size is 0: # <<<<<<<<<<<<<<
* raise ValueError("a must be non-empty")
*
*/
- __pyx_t_2 = (__pyx_v_pop_size == __pyx_int_0);
- if (__pyx_t_2) {
+ __pyx_t_5 = (__pyx_v_pop_size == __pyx_int_0);
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1007
- * pop_size = a.size
+ /* "mtrand.pyx":1012
+ * pop_size = a.shape[0]
* if pop_size is 0:
* raise ValueError("a must be non-empty") # <<<<<<<<<<<<<<
*
* if None != p:
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L6;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L15;
}
- __pyx_L6:;
+ __pyx_L15:;
}
__pyx_L3:;
- /* "mtrand.pyx":1009
+ /* "mtrand.pyx":1014
* raise ValueError("a must be non-empty")
*
* if None != p: # <<<<<<<<<<<<<<
- * p = np.array(p, dtype=np.double, ndmin=1, copy=0)
+ * p = np.array(p, dtype=np.double, ndmin=1, copy=False)
* if p.ndim != 1:
*/
- __pyx_t_4 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__pyx_t_2) {
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1010
+ /* "mtrand.pyx":1015
*
* if None != p:
- * p = np.array(p, dtype=np.double, ndmin=1, copy=0) # <<<<<<<<<<<<<<
+ * p = np.array(p, dtype=np.double, ndmin=1, copy=False) # <<<<<<<<<<<<<<
* if p.ndim != 1:
* raise ValueError("p must be 1-dimensional")
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__array); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__array); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_p);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_p);
__Pyx_GIVEREF(__pyx_v_p);
- __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_1));
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_6 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__double); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__double); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__ndmin), __pyx_int_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__copy), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_6) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__ndmin), __pyx_int_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__copy), __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_6 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_v_p);
- __pyx_v_p = __pyx_t_6;
- __pyx_t_6 = 0;
+ __pyx_v_p = __pyx_t_10;
+ __pyx_t_10 = 0;
- /* "mtrand.pyx":1011
+ /* "mtrand.pyx":1016
* if None != p:
- * p = np.array(p, dtype=np.double, ndmin=1, copy=0)
+ * p = np.array(p, dtype=np.double, ndmin=1, copy=False)
* if p.ndim != 1: # <<<<<<<<<<<<<<
* raise ValueError("p must be 1-dimensional")
* if p.size != pop_size:
*/
- __pyx_t_6 = PyObject_GetAttr(__pyx_v_p, __pyx_n_s__ndim); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1011; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_1 = PyObject_RichCompare(__pyx_t_6, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1011; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1011; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s__ndim); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_10, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (__pyx_t_2) {
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1012
- * p = np.array(p, dtype=np.double, ndmin=1, copy=0)
+ /* "mtrand.pyx":1017
+ * p = np.array(p, dtype=np.double, ndmin=1, copy=False)
* if p.ndim != 1:
* raise ValueError("p must be 1-dimensional") # <<<<<<<<<<<<<<
* if p.size != pop_size:
* raise ValueError("a and p must have same size")
*/
- __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_23), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_27), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L8;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L17;
}
- __pyx_L8:;
+ __pyx_L17:;
- /* "mtrand.pyx":1013
+ /* "mtrand.pyx":1018
* if p.ndim != 1:
* raise ValueError("p must be 1-dimensional")
* if p.size != pop_size: # <<<<<<<<<<<<<<
* raise ValueError("a and p must have same size")
* if np.any(p < 0):
*/
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_p, __pyx_n_s__size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s__size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_6 = PyObject_RichCompare(__pyx_t_1, __pyx_v_pop_size, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = PyObject_RichCompare(__pyx_t_1, __pyx_v_pop_size, Py_NE); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (__pyx_t_2) {
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1014
+ /* "mtrand.pyx":1019
* raise ValueError("p must be 1-dimensional")
* if p.size != pop_size:
* raise ValueError("a and p must have same size") # <<<<<<<<<<<<<<
* if np.any(p < 0):
* raise ValueError("probabilities are not non-negative")
*/
- __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_Raise(__pyx_t_6, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L9;
+ __pyx_t_10 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_29), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1019; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_Raise(__pyx_t_10, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1019; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L18;
}
- __pyx_L9:;
+ __pyx_L18:;
- /* "mtrand.pyx":1015
+ /* "mtrand.pyx":1020
* if p.size != pop_size:
* raise ValueError("a and p must have same size")
* if np.any(p < 0): # <<<<<<<<<<<<<<
* raise ValueError("probabilities are not non-negative")
* if not np.allclose(p.sum(), 1):
*/
- __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__any); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s__any); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_10);
+ __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (__pyx_t_2) {
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1020; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (__pyx_t_5) {
- /* "mtrand.pyx":1016
+ /* "mtrand.pyx":1021
* raise ValueError("a and p must have same size")
* if np.any(p < 0):
* raise ValueError("probabilities are not non-negative") # <<<<<<<<<<<<<<
* if not np.allclose(p.sum(), 1):
* raise ValueError("probabilities do not sum to 1")
*/
- __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_27), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_Raise(__pyx_t_6, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L10;
+ __pyx_t_10 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_31), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1021; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_Raise(__pyx_t_10, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1021; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L19;
}
- __pyx_L10:;
+ __pyx_L19:;
- /* "mtrand.pyx":1017
+ /* "mtrand.pyx":1022
* if np.any(p < 0):
* raise ValueError("probabilities are not non-negative")
* if not np.allclose(p.sum(), 1): # <<<<<<<<<<<<<<
* raise ValueError("probabilities do not sum to 1")
*
*/
- __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__allclose); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s__allclose); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_GetAttr(__pyx_v_p, __pyx_n_s__sum); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_1 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s__sum); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_1 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_INCREF(__pyx_int_1);
- PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
__pyx_t_1 = 0;
- __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_10), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_7 = (!__pyx_t_2);
- if (__pyx_t_7) {
+ __pyx_t_11 = (!__pyx_t_5);
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1018
+ /* "mtrand.pyx":1023
* raise ValueError("probabilities are not non-negative")
* if not np.allclose(p.sum(), 1):
* raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<<
*
- * # Actual sampling
+ * shape = size
*/
- __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_29), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_33), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L11;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L20;
}
- __pyx_L11:;
- goto __pyx_L7;
+ __pyx_L20:;
+ goto __pyx_L16;
}
- __pyx_L7:;
+ __pyx_L16:;
- /* "mtrand.pyx":1021
+ /* "mtrand.pyx":1025
+ * raise ValueError("probabilities do not sum to 1")
+ *
+ * shape = size # <<<<<<<<<<<<<<
+ * if shape is not None:
+ * size = np.prod(shape, dtype=np.intp)
+ */
+ __Pyx_INCREF(__pyx_v_size);
+ __pyx_v_shape = __pyx_v_size;
+
+ /* "mtrand.pyx":1026
+ *
+ * shape = size
+ * if shape is not None: # <<<<<<<<<<<<<<
+ * size = np.prod(shape, dtype=np.intp)
+ * else:
+ */
+ __pyx_t_11 = (__pyx_v_shape != Py_None);
+ if (__pyx_t_11) {
+
+ /* "mtrand.pyx":1027
+ * shape = size
+ * if shape is not None:
+ * size = np.prod(shape, dtype=np.intp) # <<<<<<<<<<<<<<
+ * else:
+ * size = 1
+ */
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__prod); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape);
+ __Pyx_GIVEREF(__pyx_v_shape);
+ __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__intp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__dtype), __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1027; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_size);
+ __pyx_v_size = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L21;
+ }
+ /*else*/ {
+
+ /* "mtrand.pyx":1029
+ * size = np.prod(shape, dtype=np.intp)
+ * else:
+ * size = 1 # <<<<<<<<<<<<<<
+ *
+ * # Actual sampling
+ */
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_DECREF(__pyx_v_size);
+ __pyx_v_size = __pyx_int_1;
+ }
+ __pyx_L21:;
+
+ /* "mtrand.pyx":1032
*
* # Actual sampling
* if replace: # <<<<<<<<<<<<<<
* if None != p:
* cdf = p.cumsum()
*/
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_replace); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1021; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__pyx_t_7) {
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_v_replace); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1032; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1022
+ /* "mtrand.pyx":1033
* # Actual sampling
* if replace:
* if None != p: # <<<<<<<<<<<<<<
* cdf = p.cumsum()
* cdf /= cdf[-1]
*/
- __pyx_t_1 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (__pyx_t_7) {
+ __pyx_t_2 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1033; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1033; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1023
+ /* "mtrand.pyx":1034
* if replace:
* if None != p:
* cdf = p.cumsum() # <<<<<<<<<<<<<<
* cdf /= cdf[-1]
- * uniform_samples = np.random.random(size)
+ * uniform_samples = self.random_sample(shape)
*/
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_p, __pyx_n_s__cumsum); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_v_cdf = __pyx_t_6;
- __pyx_t_6 = 0;
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s__cumsum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1034; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1034; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_cdf = __pyx_t_4;
+ __pyx_t_4 = 0;
- /* "mtrand.pyx":1024
+ /* "mtrand.pyx":1035
* if None != p:
* cdf = p.cumsum()
* cdf /= cdf[-1] # <<<<<<<<<<<<<<
- * uniform_samples = np.random.random(size)
+ * uniform_samples = self.random_sample(shape)
* idx = cdf.searchsorted(uniform_samples, side='right')
*/
- __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_cdf, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_6) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_1 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1024; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_cdf, -1, sizeof(long), PyInt_FromLong, 0, 1, 1); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_2 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_v_cdf);
- __pyx_v_cdf = __pyx_t_1;
- __pyx_t_1 = 0;
+ __pyx_v_cdf = __pyx_t_2;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1025
+ /* "mtrand.pyx":1036
* cdf = p.cumsum()
* cdf /= cdf[-1]
- * uniform_samples = np.random.random(size) # <<<<<<<<<<<<<<
+ * uniform_samples = self.random_sample(shape) # <<<<<<<<<<<<<<
* idx = cdf.searchsorted(uniform_samples, side='right')
- * else:
+ * idx = np.array(idx, copy=False) # searchsorted returns a scalar
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1025; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_6 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__random); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1025; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__random); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1025; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1025; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_INCREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_size);
- __Pyx_GIVEREF(__pyx_v_size);
- __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1025; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__random_sample); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __pyx_v_uniform_samples = __pyx_t_4;
- __pyx_t_4 = 0;
+ __Pyx_INCREF(__pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape);
+ __Pyx_GIVEREF(__pyx_v_shape);
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __pyx_v_uniform_samples = __pyx_t_1;
+ __pyx_t_1 = 0;
- /* "mtrand.pyx":1026
+ /* "mtrand.pyx":1037
* cdf /= cdf[-1]
- * uniform_samples = np.random.random(size)
+ * uniform_samples = self.random_sample(shape)
* idx = cdf.searchsorted(uniform_samples, side='right') # <<<<<<<<<<<<<<
+ * idx = np.array(idx, copy=False) # searchsorted returns a scalar
* else:
- * idx = self.randint(0, pop_size, size=size)
*/
- __pyx_t_4 = PyObject_GetAttr(__pyx_v_cdf, __pyx_n_s__searchsorted); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s__searchsorted); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_uniform_samples);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_uniform_samples);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_uniform_samples);
__Pyx_GIVEREF(__pyx_v_uniform_samples);
- __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_1));
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__side), ((PyObject *)__pyx_n_s__right)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1026; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __pyx_v_idx = __pyx_t_5;
- __pyx_t_5 = 0;
- goto __pyx_L13;
+ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__side), ((PyObject *)__pyx_n_s__right)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_v_idx = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "mtrand.pyx":1038
+ * uniform_samples = self.random_sample(shape)
+ * idx = cdf.searchsorted(uniform_samples, side='right')
+ * idx = np.array(idx, copy=False) # searchsorted returns a scalar # <<<<<<<<<<<<<<
+ * else:
+ * idx = self.randint(0, pop_size, size=shape)
+ */
+ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_INCREF(__pyx_v_idx);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_v_idx);
+ __Pyx_GIVEREF(__pyx_v_idx);
+ __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_4));
+ __pyx_t_1 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_t_4, ((PyObject *)__pyx_n_s__copy), __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_10), ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_idx);
+ __pyx_v_idx = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L23;
}
/*else*/ {
- /* "mtrand.pyx":1028
- * idx = cdf.searchsorted(uniform_samples, side='right')
+ /* "mtrand.pyx":1040
+ * idx = np.array(idx, copy=False) # searchsorted returns a scalar
* else:
- * idx = self.randint(0, pop_size, size=size) # <<<<<<<<<<<<<<
+ * idx = self.randint(0, pop_size, size=shape) # <<<<<<<<<<<<<<
* else:
* if size > pop_size:
*/
- __pyx_t_5 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__randint); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1028; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1028; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__randint); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
__Pyx_INCREF(__pyx_v_pop_size);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_pop_size);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_pop_size);
__Pyx_GIVEREF(__pyx_v_pop_size);
- __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1028; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_6));
- if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__size), __pyx_v_size) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1028; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1028; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __pyx_v_idx = __pyx_t_4;
- __pyx_t_4 = 0;
+ __pyx_t_10 = PyDict_New(); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_10));
+ if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__size), __pyx_v_shape) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
+ __pyx_v_idx = __pyx_t_2;
+ __pyx_t_2 = 0;
}
- __pyx_L13:;
- goto __pyx_L12;
+ __pyx_L23:;
+ goto __pyx_L22;
}
/*else*/ {
- /* "mtrand.pyx":1030
- * idx = self.randint(0, pop_size, size=size)
+ /* "mtrand.pyx":1042
+ * idx = self.randint(0, pop_size, size=shape)
* else:
* if size > pop_size: # <<<<<<<<<<<<<<
- * raise ValueError(''.join(["Cannot take a larger sample than ",
- * "population when 'replace=False'"]))
+ * raise ValueError("Cannot take a larger sample than "
+ * "population when 'replace=False'")
*/
- __pyx_t_4 = PyObject_RichCompare(__pyx_v_size, __pyx_v_pop_size, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1030; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1030; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__pyx_t_7) {
+ __pyx_t_2 = PyObject_RichCompare(__pyx_v_size, __pyx_v_pop_size, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1031
+ /* "mtrand.pyx":1043
* else:
* if size > pop_size:
- * raise ValueError(''.join(["Cannot take a larger sample than ", # <<<<<<<<<<<<<<
- * "population when 'replace=False'"]))
+ * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<<
+ * "population when 'replace=False'")
*
*/
- __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_30), __pyx_n_s__join); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = PyList_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_31));
- PyList_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_kp_s_31));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_31));
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_32));
- PyList_SET_ITEM(__pyx_t_6, 1, ((PyObject *)__pyx_kp_s_32));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_32));
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_6));
- __Pyx_GIVEREF(((PyObject *)__pyx_t_6));
- __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __Pyx_Raise(__pyx_t_6, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L14;
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_35), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L24;
}
- __pyx_L14:;
+ __pyx_L24:;
- /* "mtrand.pyx":1034
- * "population when 'replace=False'"]))
+ /* "mtrand.pyx":1046
+ * "population when 'replace=False'")
*
* if None != p: # <<<<<<<<<<<<<<
* if np.sum(p > 0) < size:
* raise ValueError("Fewer non-zero entries in p than size")
*/
- __pyx_t_6 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1034; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1034; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- if (__pyx_t_7) {
+ __pyx_t_2 = PyObject_RichCompare(Py_None, __pyx_v_p, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1035
+ /* "mtrand.pyx":1047
*
* if None != p:
* if np.sum(p > 0) < size: # <<<<<<<<<<<<<<
* raise ValueError("Fewer non-zero entries in p than size")
* n_uniq = 0
*/
- __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__sum); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__sum); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
- __Pyx_GIVEREF(__pyx_t_6);
- __pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyObject_RichCompare(__pyx_t_6, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__pyx_t_7) {
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1036
+ /* "mtrand.pyx":1048
* if None != p:
* if np.sum(p > 0) < size:
* raise ValueError("Fewer non-zero entries in p than size") # <<<<<<<<<<<<<<
* n_uniq = 0
* p = p.copy()
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_34), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_37), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L16;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L26;
}
- __pyx_L16:;
+ __pyx_L26:;
- /* "mtrand.pyx":1037
+ /* "mtrand.pyx":1049
* if np.sum(p > 0) < size:
* raise ValueError("Fewer non-zero entries in p than size")
* n_uniq = 0 # <<<<<<<<<<<<<<
* p = p.copy()
- * found = np.zeros(size, dtype=np.int)
+ * found = np.zeros(shape, dtype=np.int)
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_v_n_uniq = __pyx_int_0;
- /* "mtrand.pyx":1038
+ /* "mtrand.pyx":1050
* raise ValueError("Fewer non-zero entries in p than size")
* n_uniq = 0
* p = p.copy() # <<<<<<<<<<<<<<
- * found = np.zeros(size, dtype=np.int)
- * while n_uniq < size:
+ * found = np.zeros(shape, dtype=np.int)
+ * flat_found = found.ravel()
*/
- __pyx_t_4 = PyObject_GetAttr(__pyx_v_p, __pyx_n_s__copy); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s__copy); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1050; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1050; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_v_p);
- __pyx_v_p = __pyx_t_6;
- __pyx_t_6 = 0;
+ __pyx_v_p = __pyx_t_2;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1039
+ /* "mtrand.pyx":1051
* n_uniq = 0
* p = p.copy()
- * found = np.zeros(size, dtype=np.int) # <<<<<<<<<<<<<<
+ * found = np.zeros(shape, dtype=np.int) # <<<<<<<<<<<<<<
+ * flat_found = found.ravel()
* while n_uniq < size:
- * x = self.rand(size - n_uniq)
*/
- __pyx_t_6 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_6, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_INCREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_size);
- __Pyx_GIVEREF(__pyx_v_size);
- __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_1));
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__int); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
+ __Pyx_GIVEREF(__pyx_v_shape);
+ __pyx_t_10 = PyDict_New(); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_10));
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__int); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_n_s__dtype), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__dtype), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1051; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
__pyx_v_found = __pyx_t_3;
__pyx_t_3 = 0;
- /* "mtrand.pyx":1040
+ /* "mtrand.pyx":1052
* p = p.copy()
- * found = np.zeros(size, dtype=np.int)
+ * found = np.zeros(shape, dtype=np.int)
+ * flat_found = found.ravel() # <<<<<<<<<<<<<<
+ * while n_uniq < size:
+ * x = self.rand(size - n_uniq)
+ */
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_found, __pyx_n_s__ravel); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_10 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_flat_found = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "mtrand.pyx":1053
+ * found = np.zeros(shape, dtype=np.int)
+ * flat_found = found.ravel()
* while n_uniq < size: # <<<<<<<<<<<<<<
* x = self.rand(size - n_uniq)
* if n_uniq > 0:
*/
while (1) {
- __pyx_t_3 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (!__pyx_t_7) break;
+ __pyx_t_10 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1053; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1053; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (!__pyx_t_11) break;
- /* "mtrand.pyx":1041
- * found = np.zeros(size, dtype=np.int)
+ /* "mtrand.pyx":1054
+ * flat_found = found.ravel()
* while n_uniq < size:
* x = self.rand(size - n_uniq) # <<<<<<<<<<<<<<
* if n_uniq > 0:
- * p[found[0:n_uniq]] = 0
+ * p[flat_found[0:n_uniq]] = 0
*/
- __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__rand); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__rand); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_3 = PyNumber_Subtract(__pyx_v_size, __pyx_v_n_uniq); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyNumber_Subtract(__pyx_v_size, __pyx_v_n_uniq); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
- __Pyx_GIVEREF(__pyx_t_1);
- __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_v_x);
- __pyx_v_x = __pyx_t_1;
- __pyx_t_1 = 0;
+ __pyx_v_x = __pyx_t_3;
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1042
+ /* "mtrand.pyx":1055
* while n_uniq < size:
* x = self.rand(size - n_uniq)
* if n_uniq > 0: # <<<<<<<<<<<<<<
- * p[found[0:n_uniq]] = 0
+ * p[flat_found[0:n_uniq]] = 0
* cdf = np.cumsum(p)
*/
- __pyx_t_1 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1042; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (__pyx_t_7) {
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_11) {
- /* "mtrand.pyx":1043
+ /* "mtrand.pyx":1056
* x = self.rand(size - n_uniq)
* if n_uniq > 0:
- * p[found[0:n_uniq]] = 0 # <<<<<<<<<<<<<<
+ * p[flat_found[0:n_uniq]] = 0 # <<<<<<<<<<<<<<
* cdf = np.cumsum(p)
* cdf /= cdf[-1]
*/
- __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_v_n_uniq); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PySequence_GetSlice(__pyx_v_found, 0, __pyx_t_8); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (PyObject_SetItem(__pyx_v_p, __pyx_t_1, __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- goto __pyx_L19;
+ __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_v_flat_found, 0, 0, NULL, &__pyx_v_n_uniq, NULL, 1, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1056; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ if (PyObject_SetItem(__pyx_v_p, __pyx_t_3, __pyx_int_0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1056; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L29;
}
- __pyx_L19:;
+ __pyx_L29:;
- /* "mtrand.pyx":1044
+ /* "mtrand.pyx":1057
* if n_uniq > 0:
- * p[found[0:n_uniq]] = 0
+ * p[flat_found[0:n_uniq]] = 0
* cdf = np.cumsum(p) # <<<<<<<<<<<<<<
* cdf /= cdf[-1]
* new = cdf.searchsorted(x, side='right')
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_6 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__cumsum); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__cumsum); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_p);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_p);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_p);
__Pyx_GIVEREF(__pyx_v_p);
- __pyx_t_3 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1057; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_v_cdf);
- __pyx_v_cdf = __pyx_t_3;
- __pyx_t_3 = 0;
+ __pyx_v_cdf = __pyx_t_10;
+ __pyx_t_10 = 0;
- /* "mtrand.pyx":1045
- * p[found[0:n_uniq]] = 0
+ /* "mtrand.pyx":1058
+ * p[flat_found[0:n_uniq]] = 0
* cdf = np.cumsum(p)
* cdf /= cdf[-1] # <<<<<<<<<<<<<<
* new = cdf.searchsorted(x, side='right')
- * new = np.unique(new)
+ * _, unique_indices = np.unique(new, return_index=True)
*/
- __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_cdf, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_cdf, -1, sizeof(long), PyInt_FromLong, 0, 1, 1); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_3 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_10); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_DECREF(__pyx_v_cdf);
- __pyx_v_cdf = __pyx_t_1;
- __pyx_t_1 = 0;
+ __pyx_v_cdf = __pyx_t_3;
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1046
+ /* "mtrand.pyx":1059
* cdf = np.cumsum(p)
* cdf /= cdf[-1]
* new = cdf.searchsorted(x, side='right') # <<<<<<<<<<<<<<
- * new = np.unique(new)
- * found[n_uniq:n_uniq + new.size] = new
+ * _, unique_indices = np.unique(new, return_index=True)
+ * unique_indices.sort()
*/
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_cdf, __pyx_n_s__searchsorted); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s__searchsorted); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
__Pyx_INCREF(__pyx_v_x);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_x);
+ PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_v_x);
__Pyx_GIVEREF(__pyx_v_x);
- __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_6));
- if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__side), ((PyObject *)__pyx_n_s__right)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_2));
+ if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__side), ((PyObject *)__pyx_n_s__right)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_10), ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1059; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_v_new);
__pyx_v_new = __pyx_t_4;
__pyx_t_4 = 0;
- /* "mtrand.pyx":1047
+ /* "mtrand.pyx":1060
* cdf /= cdf[-1]
* new = cdf.searchsorted(x, side='right')
- * new = np.unique(new) # <<<<<<<<<<<<<<
- * found[n_uniq:n_uniq + new.size] = new
- * n_uniq += new.size
+ * _, unique_indices = np.unique(new, return_index=True) # <<<<<<<<<<<<<<
+ * unique_indices.sort()
+ * new = new.take(unique_indices)
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__unique); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__unique); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_new);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_new);
__Pyx_GIVEREF(__pyx_v_new);
- __pyx_t_3 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = PyDict_New(); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_10));
+ __pyx_t_3 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ if (PyDict_SetItem(__pyx_t_10, ((PyObject *)__pyx_n_s__return_index), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_new);
- __pyx_v_new = __pyx_t_3;
- __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0;
+ if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) {
+ PyObject* sequence = __pyx_t_3;
+ #if CYTHON_COMPILING_IN_CPYTHON
+ Py_ssize_t size = Py_SIZE(sequence);
+ #else
+ Py_ssize_t size = PySequence_Size(sequence);
+ #endif
+ if (unlikely(size != 2)) {
+ if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #if CYTHON_COMPILING_IN_CPYTHON
+ if (likely(PyTuple_CheckExact(sequence))) {
+ __pyx_t_10 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
+ } else {
+ __pyx_t_10 = PyList_GET_ITEM(sequence, 0);
+ __pyx_t_4 = PyList_GET_ITEM(sequence, 1);
+ }
+ __Pyx_INCREF(__pyx_t_10);
+ __Pyx_INCREF(__pyx_t_4);
+ #else
+ __pyx_t_10 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ #endif
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else
+ {
+ Py_ssize_t index = -1;
+ __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_12 = Py_TYPE(__pyx_t_2)->tp_iternext;
+ index = 0; __pyx_t_10 = __pyx_t_12(__pyx_t_2); if (unlikely(!__pyx_t_10)) goto __pyx_L30_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_10);
+ index = 1; __pyx_t_4 = __pyx_t_12(__pyx_t_2); if (unlikely(!__pyx_t_4)) goto __pyx_L30_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_4);
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_12(__pyx_t_2), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_12 = NULL;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L31_unpacking_done;
+ __pyx_L30_unpacking_failed:;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_12 = NULL;
+ if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1060; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_L31_unpacking_done:;
+ }
+ __Pyx_XDECREF(__pyx_v__);
+ __pyx_v__ = __pyx_t_10;
+ __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_v_unique_indices);
+ __pyx_v_unique_indices = __pyx_t_4;
+ __pyx_t_4 = 0;
- /* "mtrand.pyx":1048
+ /* "mtrand.pyx":1061
* new = cdf.searchsorted(x, side='right')
- * new = np.unique(new)
- * found[n_uniq:n_uniq + new.size] = new # <<<<<<<<<<<<<<
- * n_uniq += new.size
- * idx = found
+ * _, unique_indices = np.unique(new, return_index=True)
+ * unique_indices.sort() # <<<<<<<<<<<<<<
+ * new = new.take(unique_indices)
+ * flat_found[n_uniq:n_uniq + new.size] = new
*/
- __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_v_n_uniq); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_3 = PyObject_GetAttr(__pyx_v_new, __pyx_n_s__size); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_unique_indices, __pyx_n_s__sort); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyNumber_Add(__pyx_v_n_uniq, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_4); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__Pyx_PySequence_SetSlice(__pyx_v_found, __pyx_t_8, __pyx_t_9, __pyx_v_new) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- /* "mtrand.pyx":1049
- * new = np.unique(new)
- * found[n_uniq:n_uniq + new.size] = new
+ /* "mtrand.pyx":1062
+ * _, unique_indices = np.unique(new, return_index=True)
+ * unique_indices.sort()
+ * new = new.take(unique_indices) # <<<<<<<<<<<<<<
+ * flat_found[n_uniq:n_uniq + new.size] = new
+ * n_uniq += new.size
+ */
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s__take); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_unique_indices);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_unique_indices);
+ __Pyx_GIVEREF(__pyx_v_unique_indices);
+ __pyx_t_10 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_v_new);
+ __pyx_v_new = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "mtrand.pyx":1063
+ * unique_indices.sort()
+ * new = new.take(unique_indices)
+ * flat_found[n_uniq:n_uniq + new.size] = new # <<<<<<<<<<<<<<
+ * n_uniq += new.size
+ * idx = found
+ */
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s__size); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_3 = PyNumber_Add(__pyx_v_n_uniq, __pyx_t_10); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (__Pyx_PyObject_SetSlice(__pyx_v_flat_found, __pyx_v_new, 0, 0, &__pyx_v_n_uniq, &__pyx_t_3, NULL, 0, 0, 1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1063; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "mtrand.pyx":1064
+ * new = new.take(unique_indices)
+ * flat_found[n_uniq:n_uniq + new.size] = new
* n_uniq += new.size # <<<<<<<<<<<<<<
* idx = found
* else:
*/
- __pyx_t_4 = PyObject_GetAttr(__pyx_v_new, __pyx_n_s__size); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_n_uniq, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1049; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s__size); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_n_uniq, __pyx_t_3); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1064; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_v_n_uniq);
- __pyx_v_n_uniq = __pyx_t_3;
- __pyx_t_3 = 0;
+ __pyx_v_n_uniq = __pyx_t_10;
+ __pyx_t_10 = 0;
}
- /* "mtrand.pyx":1050
- * found[n_uniq:n_uniq + new.size] = new
+ /* "mtrand.pyx":1065
+ * flat_found[n_uniq:n_uniq + new.size] = new
* n_uniq += new.size
* idx = found # <<<<<<<<<<<<<<
* else:
@@ -7669,112 +8203,244 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran
*/
__Pyx_INCREF(__pyx_v_found);
__pyx_v_idx = __pyx_v_found;
- goto __pyx_L15;
+ goto __pyx_L25;
}
/*else*/ {
- /* "mtrand.pyx":1052
+ /* "mtrand.pyx":1067
* idx = found
* else:
* idx = self.permutation(pop_size)[:size] # <<<<<<<<<<<<<<
- *
- * #Use samples as indices for a if a is array-like
+ * if shape is not None:
+ * idx.shape = shape
*/
- __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__permutation); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__permutation); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_pop_size);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_pop_size);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_pop_size);
__Pyx_GIVEREF(__pyx_v_pop_size);
- __pyx_t_6 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_v_size); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = __Pyx_PySequence_GetSlice(__pyx_t_6, 0, __pyx_t_9); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1052; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- __pyx_v_idx = __pyx_t_4;
- __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetSlice(__pyx_t_4, 0, 0, NULL, &__pyx_v_size, NULL, 0, 0, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1067; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_idx = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "mtrand.pyx":1068
+ * else:
+ * idx = self.permutation(pop_size)[:size]
+ * if shape is not None: # <<<<<<<<<<<<<<
+ * idx.shape = shape
+ *
+ */
+ __pyx_t_11 = (__pyx_v_shape != Py_None);
+ if (__pyx_t_11) {
+
+ /* "mtrand.pyx":1069
+ * idx = self.permutation(pop_size)[:size]
+ * if shape is not None:
+ * idx.shape = shape # <<<<<<<<<<<<<<
+ *
+ * if shape is None and isinstance(idx, np.ndarray):
+ */
+ if (__Pyx_PyObject_SetAttrStr(__pyx_v_idx, __pyx_n_s__shape, __pyx_v_shape) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1069; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ goto __pyx_L32;
+ }
+ __pyx_L32:;
}
- __pyx_L15:;
+ __pyx_L25:;
+ }
+ __pyx_L22:;
+
+ /* "mtrand.pyx":1071
+ * idx.shape = shape
+ *
+ * if shape is None and isinstance(idx, np.ndarray): # <<<<<<<<<<<<<<
+ * # In most cases a scalar will have been made an array
+ * idx = idx.item(0)
+ */
+ __pyx_t_11 = (__pyx_v_shape == Py_None);
+ if (__pyx_t_11) {
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_5 = PyObject_IsInstance(__pyx_v_idx, __pyx_t_4); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1071; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_13 = __pyx_t_5;
+ } else {
+ __pyx_t_13 = __pyx_t_11;
}
- __pyx_L12:;
+ if (__pyx_t_13) {
- /* "mtrand.pyx":1055
+ /* "mtrand.pyx":1073
+ * if shape is None and isinstance(idx, np.ndarray):
+ * # In most cases a scalar will have been made an array
+ * idx = idx.item(0) # <<<<<<<<<<<<<<
*
* #Use samples as indices for a if a is array-like
- * if isinstance(a, int): # <<<<<<<<<<<<<<
+ */
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s__item); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_38), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_v_idx);
+ __pyx_v_idx = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L33;
+ }
+ __pyx_L33:;
+
+ /* "mtrand.pyx":1076
+ *
+ * #Use samples as indices for a if a is array-like
+ * if a.ndim == 0: # <<<<<<<<<<<<<<
* return idx
- * else:
+ *
*/
- __pyx_t_4 = ((PyObject *)((PyObject*)(&PyInt_Type)));
- __Pyx_INCREF(__pyx_t_4);
- __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_a, __pyx_t_4);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_13 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (__pyx_t_7) {
+ if (__pyx_t_13) {
- /* "mtrand.pyx":1056
+ /* "mtrand.pyx":1077
* #Use samples as indices for a if a is array-like
- * if isinstance(a, int):
+ * if a.ndim == 0:
* return idx # <<<<<<<<<<<<<<
- * else:
- * return a.take(idx)
+ *
+ * if shape is not None and idx.ndim == 0:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_idx);
__pyx_r = __pyx_v_idx;
goto __pyx_L0;
- goto __pyx_L20;
+ goto __pyx_L34;
}
- /*else*/ {
+ __pyx_L34:;
- /* "mtrand.pyx":1058
+ /* "mtrand.pyx":1079
* return idx
- * else:
- * return a.take(idx) # <<<<<<<<<<<<<<
- *
*
+ * if shape is not None and idx.ndim == 0: # <<<<<<<<<<<<<<
+ * # If size == () then the user requested a 0-d array as opposed to
+ * # a scalar object when size is None. However a[idx] is always a
*/
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = PyObject_GetAttr(__pyx_v_a, __pyx_n_s__take); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_13 = (__pyx_v_shape != Py_None);
+ if (__pyx_t_13) {
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s__ndim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_6);
- __Pyx_INCREF(__pyx_v_idx);
- PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_idx);
- __Pyx_GIVEREF(__pyx_v_idx);
- __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1058; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1079; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_5 = __pyx_t_11;
+ } else {
+ __pyx_t_5 = __pyx_t_13;
+ }
+ if (__pyx_t_5) {
+
+ /* "mtrand.pyx":1085
+ * # array, taking into account that np.array(item) may not work
+ * # for object arrays.
+ * res = np.empty((), dtype=a.dtype) # <<<<<<<<<<<<<<
+ * res[()] = a[idx]
+ * return res
+ */
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(((PyObject *)__pyx_t_3));
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__dtype); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__dtype), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __pyx_t_10 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_39), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0;
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_v_res = __pyx_t_10;
+ __pyx_t_10 = 0;
+
+ /* "mtrand.pyx":1086
+ * # for object arrays.
+ * res = np.empty((), dtype=a.dtype)
+ * res[()] = a[idx] # <<<<<<<<<<<<<<
+ * return res
+ *
+ */
+ __pyx_t_10 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1086; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ if (PyObject_SetItem(__pyx_v_res, ((PyObject *)__pyx_empty_tuple), __pyx_t_10) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1086; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+
+ /* "mtrand.pyx":1087
+ * res = np.empty((), dtype=a.dtype)
+ * res[()] = a[idx]
+ * return res # <<<<<<<<<<<<<<
+ *
+ * return a[idx]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_res);
+ __pyx_r = __pyx_v_res;
goto __pyx_L0;
+ goto __pyx_L35;
}
- __pyx_L20:;
+ __pyx_L35:;
+
+ /* "mtrand.pyx":1089
+ * return res
+ *
+ * return a[idx] # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_10 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1089; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_10);
+ __pyx_r = __pyx_t_10;
+ __pyx_t_10 = 0;
+ goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("mtrand.RandomState.choice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_pop_size);
+ __Pyx_XDECREF(__pyx_v_shape);
__Pyx_XDECREF(__pyx_v_cdf);
__Pyx_XDECREF(__pyx_v_uniform_samples);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_n_uniq);
__Pyx_XDECREF(__pyx_v_found);
+ __Pyx_XDECREF(__pyx_v_flat_found);
__Pyx_XDECREF(__pyx_v_x);
__Pyx_XDECREF(__pyx_v_new);
+ __Pyx_XDECREF(__pyx_v__);
+ __Pyx_XDECREF(__pyx_v_unique_indices);
+ __Pyx_XDECREF(__pyx_v_res);
__Pyx_XDECREF(__pyx_v_a);
+ __Pyx_XDECREF(__pyx_v_size);
__Pyx_XDECREF(__pyx_v_p);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
@@ -7788,16 +8454,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_27uniform(PyObject *__pyx_v_self
PyObject *__pyx_v_low = 0;
PyObject *__pyx_v_high = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("uniform (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__low,&__pyx_n_s__high,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_35;
- values[1] = __pyx_k_36;
+ values[0] = __pyx_k_40;
+ values[1] = __pyx_k_41;
- /* "mtrand.pyx":1061
+ /* "mtrand.pyx":1092
*
*
* def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<<
@@ -7834,7 +8503,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_27uniform(PyObject *__pyx_v_self
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "uniform") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "uniform") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1092; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -7851,7 +8520,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_27uniform(PyObject *__pyx_v_self
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("uniform", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("uniform", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1092; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.uniform", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -7880,7 +8549,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("uniform", 0);
- /* "mtrand.pyx":1135
+ /* "mtrand.pyx":1166
* cdef object temp
*
* flow = PyFloat_AsDouble(low) # <<<<<<<<<<<<<<
@@ -7889,7 +8558,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
*/
__pyx_v_flow = PyFloat_AsDouble(__pyx_v_low);
- /* "mtrand.pyx":1136
+ /* "mtrand.pyx":1167
*
* flow = PyFloat_AsDouble(low)
* fhigh = PyFloat_AsDouble(high) # <<<<<<<<<<<<<<
@@ -7898,7 +8567,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
*/
__pyx_v_fhigh = PyFloat_AsDouble(__pyx_v_high);
- /* "mtrand.pyx":1137
+ /* "mtrand.pyx":1168
* flow = PyFloat_AsDouble(low)
* fhigh = PyFloat_AsDouble(high)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -7908,7 +8577,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1138
+ /* "mtrand.pyx":1169
* fhigh = PyFloat_AsDouble(high)
* if not PyErr_Occurred():
* return cont2_array_sc(self.internal_state, rk_uniform, size, flow, fhigh-flow) # <<<<<<<<<<<<<<
@@ -7916,7 +8585,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
* olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_flow, (__pyx_v_fhigh - __pyx_v_flow)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_flow, (__pyx_v_fhigh - __pyx_v_flow)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1169; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -7925,7 +8594,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
}
__pyx_L3:;
- /* "mtrand.pyx":1139
+ /* "mtrand.pyx":1170
* if not PyErr_Occurred():
* return cont2_array_sc(self.internal_state, rk_uniform, size, flow, fhigh-flow)
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -7934,45 +8603,49 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
*/
PyErr_Clear();
- /* "mtrand.pyx":1140
+ /* "mtrand.pyx":1171
* return cont2_array_sc(self.internal_state, rk_uniform, size, flow, fhigh-flow)
* PyErr_Clear()
* olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* temp = np.subtract(ohigh, olow)
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_low, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_low, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1171; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_olow = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_olow = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1141
+ /* "mtrand.pyx":1172
* PyErr_Clear()
* olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* temp = np.subtract(ohigh, olow)
* Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_high, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_high, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_ohigh = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1142
+ /* "mtrand.pyx":1173
* olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* temp = np.subtract(ohigh, olow) # <<<<<<<<<<<<<<
* Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
* # rules because EnsureArray steals a reference
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__subtract); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__subtract); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_ohigh));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_ohigh));
@@ -7980,14 +8653,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
__Pyx_INCREF(((PyObject *)__pyx_v_olow));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_olow));
__Pyx_GIVEREF(((PyObject *)__pyx_v_olow));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1173; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
__pyx_v_temp = __pyx_t_4;
__pyx_t_4 = 0;
- /* "mtrand.pyx":1143
+ /* "mtrand.pyx":1174
* ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* temp = np.subtract(ohigh, olow)
* Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting # <<<<<<<<<<<<<<
@@ -7996,20 +8669,22 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
*/
Py_INCREF(__pyx_v_temp);
- /* "mtrand.pyx":1145
+ /* "mtrand.pyx":1176
* Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
* # rules because EnsureArray steals a reference
* odiff = <ndarray>PyArray_EnsureArray(temp) # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_uniform, size, olow, odiff)
*
*/
- __pyx_t_4 = PyArray_EnsureArray(__pyx_v_temp); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyArray_EnsureArray(__pyx_v_temp); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1176; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_4)));
- __pyx_v_odiff = ((PyArrayObject *)__pyx_t_4);
+ __pyx_t_2 = __pyx_t_4;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_odiff = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1146
+ /* "mtrand.pyx":1177
* # rules because EnsureArray steals a reference
* odiff = <ndarray>PyArray_EnsureArray(temp)
* return cont2_array(self.internal_state, rk_uniform, size, olow, odiff) # <<<<<<<<<<<<<<
@@ -8017,10 +8692,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra
* def rand(self, *args):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_olow, __pyx_v_odiff); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_r = __pyx_t_4;
- __pyx_t_4 = 0;
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_olow, __pyx_v_odiff); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1177; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -8058,7 +8733,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_29rand(PyObject *__pyx_v_self, P
return __pyx_r;
}
-/* "mtrand.pyx":1148
+/* "mtrand.pyx":1179
* return cont2_array(self.internal_state, rk_uniform, size, olow, odiff)
*
* def rand(self, *args): # <<<<<<<<<<<<<<
@@ -8079,18 +8754,18 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("rand", 0);
- /* "mtrand.pyx":1187
+ /* "mtrand.pyx":1218
*
* """
* if len(args) == 0: # <<<<<<<<<<<<<<
* return self.random_sample()
* else:
*/
- __pyx_t_1 = PyTuple_GET_SIZE(((PyObject *)__pyx_v_args)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1187; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_GET_SIZE(((PyObject *)__pyx_v_args)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 == 0);
if (__pyx_t_2) {
- /* "mtrand.pyx":1188
+ /* "mtrand.pyx":1219
* """
* if len(args) == 0:
* return self.random_sample() # <<<<<<<<<<<<<<
@@ -8098,9 +8773,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_
* return self.random_sample(size=args)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__random_sample); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__random_sample); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1188; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_4;
@@ -8110,7 +8785,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_
}
/*else*/ {
- /* "mtrand.pyx":1190
+ /* "mtrand.pyx":1221
* return self.random_sample()
* else:
* return self.random_sample(size=args) # <<<<<<<<<<<<<<
@@ -8118,12 +8793,12 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_
* def randn(self, *args):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__random_sample); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__random_sample); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
- if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__size), ((PyObject *)__pyx_v_args)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__size), ((PyObject *)__pyx_v_args)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_empty_tuple), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
@@ -8164,7 +8839,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_31randn(PyObject *__pyx_v_self,
return __pyx_r;
}
-/* "mtrand.pyx":1192
+/* "mtrand.pyx":1223
* return self.random_sample(size=args)
*
* def randn(self, *args): # <<<<<<<<<<<<<<
@@ -8185,18 +8860,18 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("randn", 0);
- /* "mtrand.pyx":1244
+ /* "mtrand.pyx":1275
*
* """
* if len(args) == 0: # <<<<<<<<<<<<<<
* return self.standard_normal()
* else:
*/
- __pyx_t_1 = PyTuple_GET_SIZE(((PyObject *)__pyx_v_args)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1244; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_GET_SIZE(((PyObject *)__pyx_v_args)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 == 0);
if (__pyx_t_2) {
- /* "mtrand.pyx":1245
+ /* "mtrand.pyx":1276
* """
* if len(args) == 0:
* return self.standard_normal() # <<<<<<<<<<<<<<
@@ -8204,9 +8879,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand
* return self.standard_normal(args)
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_4;
@@ -8216,7 +8891,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand
}
/*else*/ {
- /* "mtrand.pyx":1247
+ /* "mtrand.pyx":1278
* return self.standard_normal()
* else:
* return self.standard_normal(args) # <<<<<<<<<<<<<<
@@ -8224,14 +8899,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand
* def random_integers(self, low, high=None, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_args));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_args));
__Pyx_GIVEREF(((PyObject *)__pyx_v_args));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
@@ -8262,6 +8937,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_33random_integers(PyObject *__py
PyObject *__pyx_v_low = 0;
PyObject *__pyx_v_high = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("random_integers (wrapper)", 0);
@@ -8269,7 +8947,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_33random_integers(PyObject *__py
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__low,&__pyx_n_s__high,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":1249
+ /* "mtrand.pyx":1280
* return self.standard_normal(args)
*
* def random_integers(self, low, high=None, size=None): # <<<<<<<<<<<<<<
@@ -8305,7 +8983,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_33random_integers(PyObject *__py
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_integers") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1249; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_integers") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1280; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -8322,7 +9000,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_33random_integers(PyObject *__py
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("random_integers", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1249; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("random_integers", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1280; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.random_integers", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -8347,7 +9025,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
__Pyx_INCREF(__pyx_v_low);
__Pyx_INCREF(__pyx_v_high);
- /* "mtrand.pyx":1321
+ /* "mtrand.pyx":1352
*
* """
* if high is None: # <<<<<<<<<<<<<<
@@ -8357,7 +9035,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
__pyx_t_1 = (__pyx_v_high == Py_None);
if (__pyx_t_1) {
- /* "mtrand.pyx":1322
+ /* "mtrand.pyx":1353
* """
* if high is None:
* high = low # <<<<<<<<<<<<<<
@@ -8368,7 +9046,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
__Pyx_DECREF(__pyx_v_high);
__pyx_v_high = __pyx_v_low;
- /* "mtrand.pyx":1323
+ /* "mtrand.pyx":1354
* if high is None:
* high = low
* low = 1 # <<<<<<<<<<<<<<
@@ -8382,7 +9060,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
}
__pyx_L3:;
- /* "mtrand.pyx":1324
+ /* "mtrand.pyx":1355
* high = low
* low = 1
* return self.randint(low, high+1, size) # <<<<<<<<<<<<<<
@@ -8390,11 +9068,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
* # Complicated, continuous distributions:
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__randint); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__randint); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyNumber_Add(__pyx_v_high, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyNumber_Add(__pyx_v_high, __pyx_int_1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_low);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_low);
@@ -8405,7 +9083,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_size);
__Pyx_GIVEREF(__pyx_v_size);
__pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
@@ -8434,6 +9112,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__py
static char __pyx_doc_6mtrand_11RandomState_34standard_normal[] = "\n standard_normal(size=None)\n\n Returns samples from a Standard Normal distribution (mean=0, stdev=1).\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. Default is None, in which case a single value is\n returned.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n >>> s = np.random.standard_normal(8000)\n >>> s\n array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random\n -0.38672696, -0.4685006 ]) #random\n >>> s.shape\n (8000,)\n >>> s = np.random.standard_normal(size=(3, 4, 2))\n >>> s.shape\n (3, 4, 2)\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("standard_normal (wrapper)", 0);
@@ -8441,7 +9122,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__py
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__size,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":1327
+ /* "mtrand.pyx":1358
*
* # Complicated, continuous distributions:
* def standard_normal(self, size=None): # <<<<<<<<<<<<<<
@@ -8466,7 +9147,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__py
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1327; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1358; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -8479,7 +9160,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__py
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("standard_normal", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1327; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("standard_normal", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1358; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.standard_normal", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -8499,7 +9180,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_34standard_normal(struct __pyx_o
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("standard_normal", 0);
- /* "mtrand.pyx":1357
+ /* "mtrand.pyx":1388
*
* """
* return cont0_array(self.internal_state, rk_gauss, size) # <<<<<<<<<<<<<<
@@ -8507,7 +9188,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_34standard_normal(struct __pyx_o
* def normal(self, loc=0.0, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_gauss, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_gauss, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1388; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
@@ -8532,16 +9213,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_37normal(PyObject *__pyx_v_self,
PyObject *__pyx_v_loc = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("normal (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__loc,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_37;
- values[1] = __pyx_k_38;
+ values[0] = __pyx_k_42;
+ values[1] = __pyx_k_43;
- /* "mtrand.pyx":1359
+ /* "mtrand.pyx":1390
* return cont0_array(self.internal_state, rk_gauss, size)
*
* def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -8578,7 +9262,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_37normal(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1359; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -8595,7 +9279,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_37normal(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("normal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1359; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("normal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.normal", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -8623,7 +9307,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("normal", 0);
- /* "mtrand.pyx":1444
+ /* "mtrand.pyx":1475
* cdef double floc, fscale
*
* floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<<
@@ -8632,7 +9316,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
*/
__pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc);
- /* "mtrand.pyx":1445
+ /* "mtrand.pyx":1476
*
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -8641,7 +9325,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":1446
+ /* "mtrand.pyx":1477
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -8651,7 +9335,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1447
+ /* "mtrand.pyx":1478
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -8661,23 +9345,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1448
+ /* "mtrand.pyx":1479
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_40), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1448; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_45), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1448; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1449
+ /* "mtrand.pyx":1480
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) # <<<<<<<<<<<<<<
@@ -8685,7 +9369,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -8694,7 +9378,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
}
__pyx_L3:;
- /* "mtrand.pyx":1451
+ /* "mtrand.pyx":1482
* return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -8703,50 +9387,54 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
*/
PyErr_Clear();
- /* "mtrand.pyx":1453
+ /* "mtrand.pyx":1484
* PyErr_Clear()
*
* oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oloc = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oloc = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1454
+ /* "mtrand.pyx":1485
*
* oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1455
+ /* "mtrand.pyx":1486
* oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_normal, size, oloc, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_oscale));
@@ -8754,40 +9442,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1456
+ /* "mtrand.pyx":1487
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_normal, size, oloc, oscale)
*
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_41), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_46), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1457
+ /* "mtrand.pyx":1488
* if np.any(np.less_equal(oscale, 0)):
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) # <<<<<<<<<<<<<<
@@ -8795,7 +9483,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
* def beta(self, a, b, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_5 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1457; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
@@ -8820,11 +9508,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_38beta[] = "\n beta(a, b, size=None)\n\n The Beta distribution over ``[0, 1]``.\n\n The Beta distribution is a special case of the Dirichlet distribution,\n and is related to the Gamma distribution. It has the probability\n distribution function\n\n .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}\n (1 - x)^{\\beta - 1},\n\n where the normalisation, B, is the beta function,\n\n .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}\n (1 - t)^{\\beta - 1} dt.\n\n It is often seen in Bayesian inference and order statistics.\n\n Parameters\n ----------\n a : float\n Alpha, non-negative.\n b : float\n Beta, non-negative.\n size : tuple of ints, optional\n The number of samples to draw. The ouput is packed according to\n the size given.\n\n Returns\n -------\n out : ndarray\n Array of the given shape, containing values drawn from a\n Beta distribution.\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_38beta[] = "\n beta(a, b, size=None)\n\n The Beta distribution over ``[0, 1]``.\n\n The Beta distribution is a special case of the Dirichlet distribution,\n and is related to the Gamma distribution. It has the probability\n distribution function\n\n .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}\n (1 - x)^{\\beta - 1},\n\n where the normalisation, B, is the beta function,\n\n .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}\n (1 - t)^{\\beta - 1} dt.\n\n It is often seen in Bayesian inference and order statistics.\n\n Parameters\n ----------\n a : float\n Alpha, non-negative.\n b : float\n Beta, non-negative.\n size : tuple of ints, optional\n The number of samples to draw. The output is packed according to\n the size given.\n\n Returns\n -------\n out : ndarray\n Array of the given shape, containing values drawn from a\n Beta distribution.\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_b = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("beta (wrapper)", 0);
@@ -8832,7 +9523,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, P
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__b,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":1459
+ /* "mtrand.pyx":1490
* return cont2_array(self.internal_state, rk_normal, size, oloc, oscale)
*
* def beta(self, a, b, size=None): # <<<<<<<<<<<<<<
@@ -8858,7 +9549,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, P
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__b)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1459; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1490; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -8867,7 +9558,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, P
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "beta") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1459; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "beta") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1490; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -8884,7 +9575,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, P
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1459; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1490; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.beta", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -8912,7 +9603,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("beta", 0);
- /* "mtrand.pyx":1499
+ /* "mtrand.pyx":1530
* cdef double fa, fb
*
* fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<<
@@ -8921,7 +9612,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
*/
__pyx_v_fa = PyFloat_AsDouble(__pyx_v_a);
- /* "mtrand.pyx":1500
+ /* "mtrand.pyx":1531
*
* fa = PyFloat_AsDouble(a)
* fb = PyFloat_AsDouble(b) # <<<<<<<<<<<<<<
@@ -8930,7 +9621,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
*/
__pyx_v_fb = PyFloat_AsDouble(__pyx_v_b);
- /* "mtrand.pyx":1501
+ /* "mtrand.pyx":1532
* fa = PyFloat_AsDouble(a)
* fb = PyFloat_AsDouble(b)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -8940,7 +9631,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1502
+ /* "mtrand.pyx":1533
* fb = PyFloat_AsDouble(b)
* if not PyErr_Occurred():
* if fa <= 0: # <<<<<<<<<<<<<<
@@ -8950,23 +9641,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
__pyx_t_1 = (__pyx_v_fa <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1503
+ /* "mtrand.pyx":1534
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* if fb <= 0:
* raise ValueError("b <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_43), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_48), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1504
+ /* "mtrand.pyx":1535
* if fa <= 0:
* raise ValueError("a <= 0")
* if fb <= 0: # <<<<<<<<<<<<<<
@@ -8976,23 +9667,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
__pyx_t_1 = (__pyx_v_fb <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1505
+ /* "mtrand.pyx":1536
* raise ValueError("a <= 0")
* if fb <= 0:
* raise ValueError("b <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_45), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_50), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1506
+ /* "mtrand.pyx":1537
* if fb <= 0:
* raise ValueError("b <= 0")
* return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) # <<<<<<<<<<<<<<
@@ -9000,7 +9691,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_fa, __pyx_v_fb); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_fa, __pyx_v_fb); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -9009,7 +9700,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
}
__pyx_L3:;
- /* "mtrand.pyx":1508
+ /* "mtrand.pyx":1539
* return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -9018,50 +9709,54 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
*/
PyErr_Clear();
- /* "mtrand.pyx":1510
+ /* "mtrand.pyx":1541
* PyErr_Clear()
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oa = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oa = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1511
+ /* "mtrand.pyx":1542
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oa, 0)):
* raise ValueError("a <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_b, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_b, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1542; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_ob = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1512
+ /* "mtrand.pyx":1543
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0)): # <<<<<<<<<<<<<<
* raise ValueError("a <= 0")
* if np.any(np.less_equal(ob, 0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_oa));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_oa));
@@ -9069,57 +9764,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1543; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1513
+ /* "mtrand.pyx":1544
* ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(ob, 0)):
* raise ValueError("b <= 0")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_46), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_51), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":1514
+ /* "mtrand.pyx":1545
* if np.any(np.less_equal(oa, 0)):
* raise ValueError("a <= 0")
* if np.any(np.less_equal(ob, 0)): # <<<<<<<<<<<<<<
* raise ValueError("b <= 0")
* return cont2_array(self.internal_state, rk_beta, size, oa, ob)
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_ob));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_ob));
@@ -9127,40 +9822,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1545; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1515
+ /* "mtrand.pyx":1546
* raise ValueError("a <= 0")
* if np.any(np.less_equal(ob, 0)):
* raise ValueError("b <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_beta, size, oa, ob)
*
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_47), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_52), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":1516
+ /* "mtrand.pyx":1547
* if np.any(np.less_equal(ob, 0)):
* raise ValueError("b <= 0")
* return cont2_array(self.internal_state, rk_beta, size, oa, ob) # <<<<<<<<<<<<<<
@@ -9168,7 +9863,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_
* def exponential(self, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_oa, __pyx_v_ob); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_oa, __pyx_v_ob); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1547; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
@@ -9197,15 +9892,18 @@ static char __pyx_doc_6mtrand_11RandomState_40exponential[] = "\n exponen
static PyObject *__pyx_pw_6mtrand_11RandomState_41exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("exponential (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- values[0] = __pyx_k_48;
+ values[0] = __pyx_k_53;
- /* "mtrand.pyx":1518
+ /* "mtrand.pyx":1549
* return cont2_array(self.internal_state, rk_beta, size, oa, ob)
*
* def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -9236,7 +9934,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_41exponential(PyObject *__pyx_v_
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "exponential") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1518; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "exponential") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1549; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -9251,7 +9949,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_41exponential(PyObject *__pyx_v_
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("exponential", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1518; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("exponential", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1549; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.exponential", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -9277,7 +9975,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("exponential", 0);
- /* "mtrand.pyx":1559
+ /* "mtrand.pyx":1590
* cdef double fscale
*
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -9286,7 +9984,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":1560
+ /* "mtrand.pyx":1591
*
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -9296,7 +9994,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1561
+ /* "mtrand.pyx":1592
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -9306,23 +10004,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1562
+ /* "mtrand.pyx":1593
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_exponential, size, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_49), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_54), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1563
+ /* "mtrand.pyx":1594
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) # <<<<<<<<<<<<<<
@@ -9330,7 +10028,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -9339,7 +10037,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
}
__pyx_L3:;
- /* "mtrand.pyx":1565
+ /* "mtrand.pyx":1596
* return cont1_array_sc(self.internal_state, rk_exponential, size, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -9348,80 +10046,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
*/
PyErr_Clear();
- /* "mtrand.pyx":1567
+ /* "mtrand.pyx":1598
* PyErr_Clear()
*
* oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1567; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1568
+ /* "mtrand.pyx":1599
*
* oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont1_array(self.internal_state, rk_exponential, size, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oscale));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oscale));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1599; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1569
+ /* "mtrand.pyx":1600
* oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_exponential, size, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_50), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_55), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1570
+ /* "mtrand.pyx":1601
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
* return cont1_array(self.internal_state, rk_exponential, size, oscale) # <<<<<<<<<<<<<<
@@ -9429,10 +10129,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6
* def standard_exponential(self, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1570; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_oscale); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1601; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -9456,6 +10156,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject
static char __pyx_doc_6mtrand_11RandomState_42standard_exponential[] = "\n standard_exponential(size=None)\n\n Draw samples from the standard exponential distribution.\n\n `standard_exponential` is identical to the exponential distribution\n with a scale parameter of 1.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n Output a 3x8000 array:\n\n >>> n = np.random.standard_exponential((3, 8000))\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("standard_exponential (wrapper)", 0);
@@ -9463,7 +10166,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__size,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":1572
+ /* "mtrand.pyx":1603
* return cont1_array(self.internal_state, rk_exponential, size, oscale)
*
* def standard_exponential(self, size=None): # <<<<<<<<<<<<<<
@@ -9488,7 +10191,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_exponential") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1572; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_exponential") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1603; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -9501,7 +10204,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("standard_exponential", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1572; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("standard_exponential", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1603; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.standard_exponential", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -9521,7 +10224,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_42standard_exponential(struct __
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("standard_exponential", 0);
- /* "mtrand.pyx":1598
+ /* "mtrand.pyx":1629
*
* """
* return cont0_array(self.internal_state, rk_standard_exponential, size) # <<<<<<<<<<<<<<
@@ -9529,7 +10232,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_42standard_exponential(struct __
* def standard_gamma(self, shape, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_exponential, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1598; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_exponential, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1629; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
@@ -9553,6 +10256,9 @@ static char __pyx_doc_6mtrand_11RandomState_44standard_gamma[] = "\n stan
static PyObject *__pyx_pw_6mtrand_11RandomState_45standard_gamma(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("standard_gamma (wrapper)", 0);
@@ -9560,7 +10266,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_45standard_gamma(PyObject *__pyx
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__shape,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":1600
+ /* "mtrand.pyx":1631
* return cont0_array(self.internal_state, rk_standard_exponential, size)
*
* def standard_gamma(self, shape, size=None): # <<<<<<<<<<<<<<
@@ -9589,7 +10295,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_45standard_gamma(PyObject *__pyx
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_gamma") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_gamma") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1631; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -9604,7 +10310,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_45standard_gamma(PyObject *__pyx
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("standard_gamma", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("standard_gamma", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1631; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.standard_gamma", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -9630,7 +10336,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("standard_gamma", 0);
- /* "mtrand.pyx":1670
+ /* "mtrand.pyx":1701
* cdef double fshape
*
* fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<<
@@ -9639,7 +10345,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
*/
__pyx_v_fshape = PyFloat_AsDouble(__pyx_v_shape);
- /* "mtrand.pyx":1671
+ /* "mtrand.pyx":1702
*
* fshape = PyFloat_AsDouble(shape)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -9649,7 +10355,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1672
+ /* "mtrand.pyx":1703
* fshape = PyFloat_AsDouble(shape)
* if not PyErr_Occurred():
* if fshape <= 0: # <<<<<<<<<<<<<<
@@ -9659,23 +10365,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
__pyx_t_1 = (__pyx_v_fshape <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1673
+ /* "mtrand.pyx":1704
* if not PyErr_Occurred():
* if fshape <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_52), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1673; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_57), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1673; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1674
+ /* "mtrand.pyx":1705
* if fshape <= 0:
* raise ValueError("shape <= 0")
* return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) # <<<<<<<<<<<<<<
@@ -9683,7 +10389,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_fshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1674; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_fshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -9692,7 +10398,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
}
__pyx_L3:;
- /* "mtrand.pyx":1676
+ /* "mtrand.pyx":1707
* return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -9701,80 +10407,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
*/
PyErr_Clear();
- /* "mtrand.pyx":1677
+ /* "mtrand.pyx":1708
*
* PyErr_Clear()
* oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1708; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oshape = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oshape = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1678
+ /* "mtrand.pyx":1709
* PyErr_Clear()
* oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("shape <= 0")
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oshape));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oshape));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oshape));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1709; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1679
+ /* "mtrand.pyx":1710
* oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_53), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_58), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1680
+ /* "mtrand.pyx":1711
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0")
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) # <<<<<<<<<<<<<<
@@ -9782,10 +10490,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob
* def gamma(self, shape, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_oshape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_oshape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -9811,15 +10519,18 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_47gamma(PyObject *__pyx_v_self,
PyObject *__pyx_v_shape = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gamma (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__shape,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[1] = __pyx_k_54;
+ values[1] = __pyx_k_59;
- /* "mtrand.pyx":1682
+ /* "mtrand.pyx":1713
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape)
*
* def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -9854,7 +10565,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_47gamma(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gamma") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1682; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gamma") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1713; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -9871,7 +10582,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_47gamma(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("gamma", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1682; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("gamma", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1713; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.gamma", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -9899,7 +10610,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gamma", 0);
- /* "mtrand.pyx":1755
+ /* "mtrand.pyx":1786
* cdef double fshape, fscale
*
* fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<<
@@ -9908,7 +10619,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
*/
__pyx_v_fshape = PyFloat_AsDouble(__pyx_v_shape);
- /* "mtrand.pyx":1756
+ /* "mtrand.pyx":1787
*
* fshape = PyFloat_AsDouble(shape)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -9917,7 +10628,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":1757
+ /* "mtrand.pyx":1788
* fshape = PyFloat_AsDouble(shape)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -9927,7 +10638,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1758
+ /* "mtrand.pyx":1789
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fshape <= 0: # <<<<<<<<<<<<<<
@@ -9937,23 +10648,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
__pyx_t_1 = (__pyx_v_fshape <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1759
+ /* "mtrand.pyx":1790
* if not PyErr_Occurred():
* if fshape <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if fscale <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_55), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1759; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_60), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1759; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1760
+ /* "mtrand.pyx":1791
* if fshape <= 0:
* raise ValueError("shape <= 0")
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -9963,23 +10674,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1761
+ /* "mtrand.pyx":1792
* raise ValueError("shape <= 0")
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_56), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_61), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1762
+ /* "mtrand.pyx":1793
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) # <<<<<<<<<<<<<<
@@ -9987,7 +10698,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1762; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -9996,7 +10707,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
}
__pyx_L3:;
- /* "mtrand.pyx":1764
+ /* "mtrand.pyx":1795
* return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -10005,52 +10716,56 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
*/
PyErr_Clear();
- /* "mtrand.pyx":1765
+ /* "mtrand.pyx":1796
*
* PyErr_Clear()
* oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oshape = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oshape = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1766
+ /* "mtrand.pyx":1797
* PyErr_Clear()
* oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1767
+ /* "mtrand.pyx":1798
* oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("shape <= 0")
* if np.any(np.less_equal(oscale, 0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oshape));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oshape));
@@ -10058,59 +10773,59 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1768
+ /* "mtrand.pyx":1799
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_57), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_62), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":1769
+ /* "mtrand.pyx":1800
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0")
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_oscale));
@@ -10118,40 +10833,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1770
+ /* "mtrand.pyx":1801
* raise ValueError("shape <= 0")
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_58), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1770; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_63), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1770; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":1771
+ /* "mtrand.pyx":1802
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) # <<<<<<<<<<<<<<
@@ -10159,7 +10874,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
* def f(self, dfnum, dfden, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -10184,11 +10899,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_48f[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from a F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom\n in denominator), where both parameters should be greater than zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : float\n Degrees of freedom in denominator. Should be greater than zero.\n size : {tuple, int}, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``,\n then ``m * n * k`` samples are drawn. By default only one sample\n is returned.\n\n Returns\n -------\n samples : {ndarray, scalar}\n Samples from the Fisher distribution.\n\n See Also\n --------\n scipy.stats.distributions.f : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.""\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40.\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_48f[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from a F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom\n in denominator), where both parameters should be greater than zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : float\n Degrees of freedom in denominator. Should be greater than zero.\n size : {tuple, int}, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``,\n then ``m * n * k`` samples are drawn. By default only one sample\n is returned.\n\n Returns\n -------\n samples : {ndarray, scalar}\n Samples from the Fisher distribution.\n\n See Also\n --------\n scipy.stats.distributions.f : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.""\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40.\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_dfnum = 0;
PyObject *__pyx_v_dfden = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("f (wrapper)", 0);
@@ -10196,7 +10914,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyOb
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__dfnum,&__pyx_n_s__dfden,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":1773
+ /* "mtrand.pyx":1804
* return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale)
*
* def f(self, dfnum, dfden, size=None): # <<<<<<<<<<<<<<
@@ -10222,7 +10940,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyOb
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dfden)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1773; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1804; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -10231,7 +10949,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyOb
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1773; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1804; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -10248,7 +10966,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyOb
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1773; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1804; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.f", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -10276,7 +10994,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("f", 0);
- /* "mtrand.pyx":1857
+ /* "mtrand.pyx":1887
* cdef double fdfnum, fdfden
*
* fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<<
@@ -10285,7 +11003,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
*/
__pyx_v_fdfnum = PyFloat_AsDouble(__pyx_v_dfnum);
- /* "mtrand.pyx":1858
+ /* "mtrand.pyx":1888
*
* fdfnum = PyFloat_AsDouble(dfnum)
* fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<<
@@ -10294,7 +11012,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
*/
__pyx_v_fdfden = PyFloat_AsDouble(__pyx_v_dfden);
- /* "mtrand.pyx":1859
+ /* "mtrand.pyx":1889
* fdfnum = PyFloat_AsDouble(dfnum)
* fdfden = PyFloat_AsDouble(dfden)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -10304,7 +11022,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1860
+ /* "mtrand.pyx":1890
* fdfden = PyFloat_AsDouble(dfden)
* if not PyErr_Occurred():
* if fdfnum <= 0: # <<<<<<<<<<<<<<
@@ -10314,23 +11032,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
__pyx_t_1 = (__pyx_v_fdfnum <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1861
+ /* "mtrand.pyx":1891
* if not PyErr_Occurred():
* if fdfnum <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if fdfden <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_59), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_64), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1862
+ /* "mtrand.pyx":1892
* if fdfnum <= 0:
* raise ValueError("shape <= 0")
* if fdfden <= 0: # <<<<<<<<<<<<<<
@@ -10340,23 +11058,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
__pyx_t_1 = (__pyx_v_fdfden <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1863
+ /* "mtrand.pyx":1893
* raise ValueError("shape <= 0")
* if fdfden <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_60), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_65), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1864
+ /* "mtrand.pyx":1894
* if fdfden <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) # <<<<<<<<<<<<<<
@@ -10364,7 +11082,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1894; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -10373,7 +11091,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
}
__pyx_L3:;
- /* "mtrand.pyx":1866
+ /* "mtrand.pyx":1896
* return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -10382,52 +11100,56 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
*/
PyErr_Clear();
- /* "mtrand.pyx":1868
+ /* "mtrand.pyx":1898
* PyErr_Clear()
*
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odfnum, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1868; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1898; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1869
+ /* "mtrand.pyx":1899
*
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odfnum, 0.0)):
* raise ValueError("dfnum <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1869; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1899; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_odfden = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1870
+ /* "mtrand.pyx":1900
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odfnum, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("dfnum <= 0")
* if np.any(np.less_equal(odfden, 0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_odfnum));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_odfnum));
@@ -10435,59 +11157,59 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1870; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1871
+ /* "mtrand.pyx":1901
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odfnum, 0.0)):
* raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_62), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_67), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1901; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1901; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":1872
+ /* "mtrand.pyx":1902
* if np.any(np.less_equal(odfnum, 0.0)):
* raise ValueError("dfnum <= 0")
* if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("dfden <= 0")
* return cont2_array(self.internal_state, rk_f, size, odfnum, odfden)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_odfden));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_odfden));
@@ -10495,40 +11217,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1872; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1873
+ /* "mtrand.pyx":1903
* raise ValueError("dfnum <= 0")
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_f, size, odfnum, odfden)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_64), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_69), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":1874
+ /* "mtrand.pyx":1904
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
* return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) # <<<<<<<<<<<<<<
@@ -10536,7 +11258,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran
* def noncentral_f(self, dfnum, dfden, nonc, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1874; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -10567,6 +11289,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v
PyObject *__pyx_v_dfden = 0;
PyObject *__pyx_v_nonc = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("noncentral_f (wrapper)", 0);
@@ -10574,7 +11299,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__dfnum,&__pyx_n_s__dfden,&__pyx_n_s__nonc,&__pyx_n_s__size,0};
PyObject* values[4] = {0,0,0,0};
- /* "mtrand.pyx":1876
+ /* "mtrand.pyx":1906
* return cont2_array(self.internal_state, rk_f, size, odfnum, odfden)
*
* def noncentral_f(self, dfnum, dfden, nonc, size=None): # <<<<<<<<<<<<<<
@@ -10601,12 +11326,12 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dfden)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1876; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1906; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__nonc)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1876; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1906; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
@@ -10615,7 +11340,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_f") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1876; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_f") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1906; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -10634,7 +11359,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1876; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1906; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.noncentral_f", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -10664,7 +11389,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("noncentral_f", 0);
- /* "mtrand.pyx":1943
+ /* "mtrand.pyx":1973
* cdef double fdfnum, fdfden, fnonc
*
* fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<<
@@ -10673,7 +11398,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
__pyx_v_fdfnum = PyFloat_AsDouble(__pyx_v_dfnum);
- /* "mtrand.pyx":1944
+ /* "mtrand.pyx":1974
*
* fdfnum = PyFloat_AsDouble(dfnum)
* fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<<
@@ -10682,7 +11407,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
__pyx_v_fdfden = PyFloat_AsDouble(__pyx_v_dfden);
- /* "mtrand.pyx":1945
+ /* "mtrand.pyx":1975
* fdfnum = PyFloat_AsDouble(dfnum)
* fdfden = PyFloat_AsDouble(dfden)
* fnonc = PyFloat_AsDouble(nonc) # <<<<<<<<<<<<<<
@@ -10691,7 +11416,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
__pyx_v_fnonc = PyFloat_AsDouble(__pyx_v_nonc);
- /* "mtrand.pyx":1946
+ /* "mtrand.pyx":1976
* fdfden = PyFloat_AsDouble(dfden)
* fnonc = PyFloat_AsDouble(nonc)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -10701,7 +11426,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":1947
+ /* "mtrand.pyx":1977
* fnonc = PyFloat_AsDouble(nonc)
* if not PyErr_Occurred():
* if fdfnum <= 1: # <<<<<<<<<<<<<<
@@ -10711,23 +11436,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
__pyx_t_1 = (__pyx_v_fdfnum <= 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1948
+ /* "mtrand.pyx":1978
* if not PyErr_Occurred():
* if fdfnum <= 1:
* raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<<
* if fdfden <= 0:
* raise ValueError("dfden <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_66), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_71), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":1949
+ /* "mtrand.pyx":1979
* if fdfnum <= 1:
* raise ValueError("dfnum <= 1")
* if fdfden <= 0: # <<<<<<<<<<<<<<
@@ -10737,23 +11462,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
__pyx_t_1 = (__pyx_v_fdfden <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1950
+ /* "mtrand.pyx":1980
* raise ValueError("dfnum <= 1")
* if fdfden <= 0:
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* if fnonc < 0:
* raise ValueError("nonc < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_67), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_72), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":1951
+ /* "mtrand.pyx":1981
* if fdfden <= 0:
* raise ValueError("dfden <= 0")
* if fnonc < 0: # <<<<<<<<<<<<<<
@@ -10763,23 +11488,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
__pyx_t_1 = (__pyx_v_fnonc < 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":1952
+ /* "mtrand.pyx":1982
* raise ValueError("dfden <= 0")
* if fnonc < 0:
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
* fdfnum, fdfden, fnonc)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_69), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_74), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":1953
+ /* "mtrand.pyx":1983
* if fnonc < 0:
* raise ValueError("nonc < 0")
* return cont3_array_sc(self.internal_state, rk_noncentral_f, size, # <<<<<<<<<<<<<<
@@ -10788,14 +11513,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":1954
+ /* "mtrand.pyx":1984
* raise ValueError("nonc < 0")
* return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
* fdfnum, fdfden, fnonc) # <<<<<<<<<<<<<<
*
* PyErr_Clear()
*/
- __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1983; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -10804,7 +11529,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
}
__pyx_L3:;
- /* "mtrand.pyx":1956
+ /* "mtrand.pyx":1986
* fdfnum, fdfden, fnonc)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -10813,226 +11538,232 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
PyErr_Clear();
- /* "mtrand.pyx":1958
+ /* "mtrand.pyx":1988
* PyErr_Clear()
*
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1958; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1988; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1959
+ /* "mtrand.pyx":1989
*
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1959; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1989; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_odfden = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":1960
+ /* "mtrand.pyx":1990
* odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
*
* if np.any(np.less_equal(odfnum, 1.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1960; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_ononc = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_ononc = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":1962
+ /* "mtrand.pyx":1992
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*
* if np.any(np.less_equal(odfnum, 1.0)): # <<<<<<<<<<<<<<
* raise ValueError("dfnum <= 1")
* if np.any(np.less_equal(odfden, 0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_odfnum));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_odfnum));
__Pyx_GIVEREF(((PyObject *)__pyx_v_odfnum));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1962; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1992; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1963
+ /* "mtrand.pyx":1993
*
* if np.any(np.less_equal(odfnum, 1.0)):
* raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_70), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1963; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1963; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_75), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1993; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1993; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":1964
+ /* "mtrand.pyx":1994
* if np.any(np.less_equal(odfnum, 1.0)):
* raise ValueError("dfnum <= 1")
* if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("dfden <= 0")
* if np.any(np.less(ononc, 0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_odfden));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_odfden));
__Pyx_GIVEREF(((PyObject *)__pyx_v_odfden));
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1964; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1994; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1965
+ /* "mtrand.pyx":1995
* raise ValueError("dfnum <= 1")
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less(ononc, 0.0)):
* raise ValueError("nonc < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_71), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_76), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
- /* "mtrand.pyx":1966
+ /* "mtrand.pyx":1996
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
* if np.any(np.less(ononc, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("nonc < 0")
* return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_ononc));
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_ononc));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_ononc));
__Pyx_GIVEREF(((PyObject *)__pyx_v_ononc));
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1966; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1996; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":1967
+ /* "mtrand.pyx":1997
* raise ValueError("dfden <= 0")
* if np.any(np.less(ononc, 0.0)):
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum,
* odfden, ononc)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_72), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1967; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1967; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_77), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L9;
}
__pyx_L9:;
- /* "mtrand.pyx":1968
+ /* "mtrand.pyx":1998
* if np.any(np.less(ononc, 0.0)):
* raise ValueError("nonc < 0")
* return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, # <<<<<<<<<<<<<<
@@ -11041,17 +11772,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":1969
+ /* "mtrand.pyx":1999
* raise ValueError("nonc < 0")
* return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum,
* odfden, ononc) # <<<<<<<<<<<<<<
*
* def chisquare(self, df, size=None):
*/
- __pyx_t_2 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_v_ononc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1968; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_v_ononc); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -11078,6 +11809,9 @@ static char __pyx_doc_6mtrand_11RandomState_52chisquare[] = "\n chisquare
static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_df = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("chisquare (wrapper)", 0);
@@ -11085,7 +11819,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_se
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__df,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":1971
+ /* "mtrand.pyx":2001
* odfden, ononc)
*
* def chisquare(self, df, size=None): # <<<<<<<<<<<<<<
@@ -11114,7 +11848,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_se
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1971; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2001; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -11129,7 +11863,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_se
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("chisquare", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1971; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("chisquare", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2001; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -11155,7 +11889,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("chisquare", 0);
- /* "mtrand.pyx":2036
+ /* "mtrand.pyx":2066
* cdef double fdf
*
* fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<<
@@ -11164,7 +11898,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
*/
__pyx_v_fdf = PyFloat_AsDouble(__pyx_v_df);
- /* "mtrand.pyx":2037
+ /* "mtrand.pyx":2067
*
* fdf = PyFloat_AsDouble(df)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -11174,7 +11908,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2038
+ /* "mtrand.pyx":2068
* fdf = PyFloat_AsDouble(df)
* if not PyErr_Occurred():
* if fdf <= 0: # <<<<<<<<<<<<<<
@@ -11184,23 +11918,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fdf <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2039
+ /* "mtrand.pyx":2069
* if not PyErr_Occurred():
* if fdf <= 0:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_74), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_79), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2069; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2069; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2040
+ /* "mtrand.pyx":2070
* if fdf <= 0:
* raise ValueError("df <= 0")
* return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) # <<<<<<<<<<<<<<
@@ -11208,7 +11942,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_fdf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2040; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_fdf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2070; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -11217,7 +11951,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
}
__pyx_L3:;
- /* "mtrand.pyx":2042
+ /* "mtrand.pyx":2072
* return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -11226,80 +11960,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
*/
PyErr_Clear();
- /* "mtrand.pyx":2044
+ /* "mtrand.pyx":2074
* PyErr_Clear()
*
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2074; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_odf = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_odf = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2045
+ /* "mtrand.pyx":2075
*
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("df <= 0")
* return cont1_array(self.internal_state, rk_chisquare, size, odf)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_odf));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_odf));
__Pyx_GIVEREF(((PyObject *)__pyx_v_odf));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2075; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2046
+ /* "mtrand.pyx":2076
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_chisquare, size, odf)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_75), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_80), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2047
+ /* "mtrand.pyx":2077
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0")
* return cont1_array(self.internal_state, rk_chisquare, size, odf) # <<<<<<<<<<<<<<
@@ -11307,10 +12043,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt
* def noncentral_chisquare(self, df, nonc, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_odf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_odf); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2077; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -11336,6 +12072,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject
PyObject *__pyx_v_df = 0;
PyObject *__pyx_v_nonc = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("noncentral_chisquare (wrapper)", 0);
@@ -11343,7 +12082,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__df,&__pyx_n_s__nonc,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":2049
+ /* "mtrand.pyx":2079
* return cont1_array(self.internal_state, rk_chisquare, size, odf)
*
* def noncentral_chisquare(self, df, nonc, size=None): # <<<<<<<<<<<<<<
@@ -11369,7 +12108,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__nonc)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2049; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2079; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -11378,7 +12117,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2049; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2079; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -11395,7 +12134,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2049; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2079; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.noncentral_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -11423,7 +12162,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("noncentral_chisquare", 0);
- /* "mtrand.pyx":2120
+ /* "mtrand.pyx":2150
* cdef ndarray odf, ononc
* cdef double fdf, fnonc
* fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<<
@@ -11432,7 +12171,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
*/
__pyx_v_fdf = PyFloat_AsDouble(__pyx_v_df);
- /* "mtrand.pyx":2121
+ /* "mtrand.pyx":2151
* cdef double fdf, fnonc
* fdf = PyFloat_AsDouble(df)
* fnonc = PyFloat_AsDouble(nonc) # <<<<<<<<<<<<<<
@@ -11441,7 +12180,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
*/
__pyx_v_fnonc = PyFloat_AsDouble(__pyx_v_nonc);
- /* "mtrand.pyx":2122
+ /* "mtrand.pyx":2152
* fdf = PyFloat_AsDouble(df)
* fnonc = PyFloat_AsDouble(nonc)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -11451,7 +12190,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2123
+ /* "mtrand.pyx":2153
* fnonc = PyFloat_AsDouble(nonc)
* if not PyErr_Occurred():
* if fdf <= 1: # <<<<<<<<<<<<<<
@@ -11461,23 +12200,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
__pyx_t_1 = (__pyx_v_fdf <= 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2124
+ /* "mtrand.pyx":2154
* if not PyErr_Occurred():
* if fdf <= 1:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* if fnonc <= 0:
* raise ValueError("nonc <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_76), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_81), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2125
+ /* "mtrand.pyx":2155
* if fdf <= 1:
* raise ValueError("df <= 0")
* if fnonc <= 0: # <<<<<<<<<<<<<<
@@ -11487,23 +12226,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
__pyx_t_1 = (__pyx_v_fnonc <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2126
+ /* "mtrand.pyx":2156
* raise ValueError("df <= 0")
* if fnonc <= 0:
* raise ValueError("nonc <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_noncentral_chisquare,
* size, fdf, fnonc)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_78), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_83), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2127
+ /* "mtrand.pyx":2157
* if fnonc <= 0:
* raise ValueError("nonc <= 0")
* return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, # <<<<<<<<<<<<<<
@@ -11512,14 +12251,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":2128
+ /* "mtrand.pyx":2158
* raise ValueError("nonc <= 0")
* return cont2_array_sc(self.internal_state, rk_noncentral_chisquare,
* size, fdf, fnonc) # <<<<<<<<<<<<<<
*
* PyErr_Clear()
*/
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -11528,7 +12267,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
}
__pyx_L3:;
- /* "mtrand.pyx":2130
+ /* "mtrand.pyx":2160
* size, fdf, fnonc)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -11537,52 +12276,56 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
*/
PyErr_Clear();
- /* "mtrand.pyx":2132
+ /* "mtrand.pyx":2162
* PyErr_Clear()
*
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_odf = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_odf = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2133
+ /* "mtrand.pyx":2163
*
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 1")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_ononc = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":2134
+ /* "mtrand.pyx":2164
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("df <= 1")
* if np.any(np.less_equal(ononc, 0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_odf));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_odf));
@@ -11590,59 +12333,59 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2135
+ /* "mtrand.pyx":2165
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 1") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(ononc, 0.0)):
* raise ValueError("nonc < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_80), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_85), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":2136
+ /* "mtrand.pyx":2166
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 1")
* if np.any(np.less_equal(ononc, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("nonc < 0")
* return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_ononc));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_ononc));
@@ -11650,40 +12393,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2137
+ /* "mtrand.pyx":2167
* raise ValueError("df <= 1")
* if np.any(np.less_equal(ononc, 0.0)):
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
* odf, ononc)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_81), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_86), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2167; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2167; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":2138
+ /* "mtrand.pyx":2168
* if np.any(np.less_equal(ononc, 0.0)):
* raise ValueError("nonc < 0")
* return cont2_array(self.internal_state, rk_noncentral_chisquare, size, # <<<<<<<<<<<<<<
@@ -11692,14 +12435,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":2139
+ /* "mtrand.pyx":2169
* raise ValueError("nonc < 0")
* return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
* odf, ononc) # <<<<<<<<<<<<<<
*
* def standard_cauchy(self, size=None):
*/
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_v_ononc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2138; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_v_ononc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -11727,6 +12470,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__py
static char __pyx_doc_6mtrand_11RandomState_56standard_cauchy[] = "\n standard_cauchy(size=None)\n\n Standard Cauchy distribution with mode = 0.\n\n Also known as the Lorentz distribution.\n\n Parameters\n ----------\n size : int or tuple of ints\n Shape of the output.\n\n Returns\n -------\n samples : ndarray or scalar\n The drawn samples.\n\n Notes\n -----\n The probability density function for the full Cauchy distribution is\n\n .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+\n (\\frac{x-x_0}{\\gamma})^2 \\bigr] }\n\n and the Standard Cauchy distribution just sets :math:`x_0=0` and\n :math:`\\gamma=1`\n\n The Cauchy distribution arises in the solution to the driven harmonic\n oscillator problem, and also describes spectral line broadening. It\n also describes the distribution of values at which a line tilted at\n a random angle will cut the x axis.\n\n When studying hypothesis tests that assume normality, seeing how the\n tests perform on data from a Cauchy distribution is a good indicator of\n their sensitivity to a heavy-tailed distribution, since the Cauchy looks\n very much like a Gaussian distribution, but with heavier tails.\n\n References\n ----------\n .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, \"Cauchy\n Distribution\",\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm\n .. [2] Weisstein, Eric W. \"Cauchy Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/CauchyDistribution.html\n .. [3] Wikipedia, \"Cauchy distribution\"\n http://en.wikipedia.org/wiki/Cauchy_distribution\n\n Examples\n --------\n Draw samples and plot the distribution:\n\n >>> s = np.random.standard_cauchy(1000000)\n >>> s = s[(s>-25) & (s<""25)] # truncate distribution so it plots well\n >>> plt.hist(s, bins=100)\n >>> plt.show()\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("standard_cauchy (wrapper)", 0);
@@ -11734,7 +12480,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__py
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__size,0};
PyObject* values[1] = {0};
- /* "mtrand.pyx":2141
+ /* "mtrand.pyx":2171
* odf, ononc)
*
* def standard_cauchy(self, size=None): # <<<<<<<<<<<<<<
@@ -11759,7 +12505,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__py
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_cauchy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2141; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_cauchy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2171; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -11772,7 +12518,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__py
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("standard_cauchy", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2141; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("standard_cauchy", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2171; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.standard_cauchy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -11792,7 +12538,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_56standard_cauchy(struct __pyx_o
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("standard_cauchy", 0);
- /* "mtrand.pyx":2200
+ /* "mtrand.pyx":2230
*
* """
* return cont0_array(self.internal_state, rk_standard_cauchy, size) # <<<<<<<<<<<<<<
@@ -11800,7 +12546,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_56standard_cauchy(struct __pyx_o
* def standard_t(self, df, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_cauchy, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_cauchy, __pyx_v_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
@@ -11824,6 +12570,9 @@ static char __pyx_doc_6mtrand_11RandomState_58standard_t[] = "\n standard
static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_df = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("standard_t (wrapper)", 0);
@@ -11831,7 +12580,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_s
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__df,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":2202
+ /* "mtrand.pyx":2232
* return cont0_array(self.internal_state, rk_standard_cauchy, size)
*
* def standard_t(self, df, size=None): # <<<<<<<<<<<<<<
@@ -11860,7 +12609,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_s
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_t") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2202; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_t") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2232; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -11875,7 +12624,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_s
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("standard_t", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2202; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("standard_t", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2232; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.standard_t", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -11901,7 +12650,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("standard_t", 0);
- /* "mtrand.pyx":2290
+ /* "mtrand.pyx":2320
* cdef double fdf
*
* fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<<
@@ -11910,7 +12659,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
*/
__pyx_v_fdf = PyFloat_AsDouble(__pyx_v_df);
- /* "mtrand.pyx":2291
+ /* "mtrand.pyx":2321
*
* fdf = PyFloat_AsDouble(df)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -11920,7 +12669,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2292
+ /* "mtrand.pyx":2322
* fdf = PyFloat_AsDouble(df)
* if not PyErr_Occurred():
* if fdf <= 0: # <<<<<<<<<<<<<<
@@ -11930,23 +12679,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
__pyx_t_1 = (__pyx_v_fdf <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2293
+ /* "mtrand.pyx":2323
* if not PyErr_Occurred():
* if fdf <= 0:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_82), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_87), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2294
+ /* "mtrand.pyx":2324
* if fdf <= 0:
* raise ValueError("df <= 0")
* return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf) # <<<<<<<<<<<<<<
@@ -11954,7 +12703,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_fdf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2294; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_fdf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2324; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -11963,7 +12712,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
}
__pyx_L3:;
- /* "mtrand.pyx":2296
+ /* "mtrand.pyx":2326
* return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -11972,80 +12721,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
*/
PyErr_Clear();
- /* "mtrand.pyx":2298
+ /* "mtrand.pyx":2328
* PyErr_Clear()
*
* odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2298; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2328; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_odf = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_odf = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2299
+ /* "mtrand.pyx":2329
*
* odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("df <= 0")
* return cont1_array(self.internal_state, rk_standard_t, size, odf)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_odf));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_odf));
__Pyx_GIVEREF(((PyObject *)__pyx_v_odf));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2299; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2329; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2300
+ /* "mtrand.pyx":2330
* odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_standard_t, size, odf)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_83), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2300; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2300; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_88), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2301
+ /* "mtrand.pyx":2331
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0")
* return cont1_array(self.internal_state, rk_standard_t, size, odf) # <<<<<<<<<<<<<<
@@ -12053,10 +12804,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m
* def vonmises(self, mu, kappa, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_odf); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2301; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_odf); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -12082,6 +12833,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel
PyObject *__pyx_v_mu = 0;
PyObject *__pyx_v_kappa = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("vonmises (wrapper)", 0);
@@ -12089,7 +12843,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__mu,&__pyx_n_s__kappa,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":2303
+ /* "mtrand.pyx":2333
* return cont1_array(self.internal_state, rk_standard_t, size, odf)
*
* def vonmises(self, mu, kappa, size=None): # <<<<<<<<<<<<<<
@@ -12115,7 +12869,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__kappa)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2303; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2333; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -12124,7 +12878,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vonmises") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2303; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vonmises") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2333; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -12141,7 +12895,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2303; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2333; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.vonmises", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -12169,7 +12923,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("vonmises", 0);
- /* "mtrand.pyx":2382
+ /* "mtrand.pyx":2412
* cdef double fmu, fkappa
*
* fmu = PyFloat_AsDouble(mu) # <<<<<<<<<<<<<<
@@ -12178,7 +12932,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
*/
__pyx_v_fmu = PyFloat_AsDouble(__pyx_v_mu);
- /* "mtrand.pyx":2383
+ /* "mtrand.pyx":2413
*
* fmu = PyFloat_AsDouble(mu)
* fkappa = PyFloat_AsDouble(kappa) # <<<<<<<<<<<<<<
@@ -12187,7 +12941,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
*/
__pyx_v_fkappa = PyFloat_AsDouble(__pyx_v_kappa);
- /* "mtrand.pyx":2384
+ /* "mtrand.pyx":2414
* fmu = PyFloat_AsDouble(mu)
* fkappa = PyFloat_AsDouble(kappa)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -12197,7 +12951,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2385
+ /* "mtrand.pyx":2415
* fkappa = PyFloat_AsDouble(kappa)
* if not PyErr_Occurred():
* if fkappa < 0: # <<<<<<<<<<<<<<
@@ -12207,23 +12961,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
__pyx_t_1 = (__pyx_v_fkappa < 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2386
+ /* "mtrand.pyx":2416
* if not PyErr_Occurred():
* if fkappa < 0:
* raise ValueError("kappa < 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_85), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_90), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2387
+ /* "mtrand.pyx":2417
* if fkappa < 0:
* raise ValueError("kappa < 0")
* return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) # <<<<<<<<<<<<<<
@@ -12231,7 +12985,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_fmu, __pyx_v_fkappa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_fmu, __pyx_v_fkappa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2417; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -12240,7 +12994,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
}
__pyx_L3:;
- /* "mtrand.pyx":2389
+ /* "mtrand.pyx":2419
* return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -12249,52 +13003,56 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
*/
PyErr_Clear();
- /* "mtrand.pyx":2391
+ /* "mtrand.pyx":2421
* PyErr_Clear()
*
* omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(okappa, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2391; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2421; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_omu = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_omu = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2392
+ /* "mtrand.pyx":2422
*
* omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less(okappa, 0.0)):
* raise ValueError("kappa < 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2422; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_okappa = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":2393
+ /* "mtrand.pyx":2423
* omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(okappa, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("kappa < 0")
* return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_okappa));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_okappa));
@@ -12302,40 +13060,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2393; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2423; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2394
+ /* "mtrand.pyx":2424
* okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(okappa, 0.0)):
* raise ValueError("kappa < 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_86), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_91), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2424; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2424; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2395
+ /* "mtrand.pyx":2425
* if np.any(np.less(okappa, 0.0)):
* raise ValueError("kappa < 0")
* return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) # <<<<<<<<<<<<<<
@@ -12343,7 +13101,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr
* def pareto(self, a, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_omu, __pyx_v_okappa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_omu, __pyx_v_okappa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2425; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -12372,6 +13130,9 @@ static char __pyx_doc_6mtrand_11RandomState_62pareto[] = "\n pareto(a, si
static PyObject *__pyx_pw_6mtrand_11RandomState_63pareto(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("pareto (wrapper)", 0);
@@ -12379,7 +13140,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_63pareto(PyObject *__pyx_v_self,
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":2397
+ /* "mtrand.pyx":2427
* return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa)
*
* def pareto(self, a, size=None): # <<<<<<<<<<<<<<
@@ -12408,7 +13169,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_63pareto(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pareto") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2397; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pareto") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2427; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -12423,7 +13184,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_63pareto(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("pareto", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2397; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("pareto", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2427; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.pareto", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -12449,7 +13210,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pareto", 0);
- /* "mtrand.pyx":2480
+ /* "mtrand.pyx":2510
* cdef double fa
*
* fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<<
@@ -12458,7 +13219,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
*/
__pyx_v_fa = PyFloat_AsDouble(__pyx_v_a);
- /* "mtrand.pyx":2481
+ /* "mtrand.pyx":2511
*
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -12468,7 +13229,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2482
+ /* "mtrand.pyx":2512
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred():
* if fa <= 0: # <<<<<<<<<<<<<<
@@ -12478,23 +13239,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
__pyx_t_1 = (__pyx_v_fa <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2483
+ /* "mtrand.pyx":2513
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_pareto, size, fa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_87), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_92), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2484
+ /* "mtrand.pyx":2514
* if fa <= 0:
* raise ValueError("a <= 0")
* return cont1_array_sc(self.internal_state, rk_pareto, size, fa) # <<<<<<<<<<<<<<
@@ -12502,7 +13263,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -12511,7 +13272,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
}
__pyx_L3:;
- /* "mtrand.pyx":2486
+ /* "mtrand.pyx":2516
* return cont1_array_sc(self.internal_state, rk_pareto, size, fa)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -12520,80 +13281,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
*/
PyErr_Clear();
- /* "mtrand.pyx":2488
+ /* "mtrand.pyx":2518
* PyErr_Clear()
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oa = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oa = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2489
+ /* "mtrand.pyx":2519
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_pareto, size, oa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oa));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oa));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oa));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2490
+ /* "mtrand.pyx":2520
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_pareto, size, oa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_88), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_93), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2491
+ /* "mtrand.pyx":2521
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_pareto, size, oa) # <<<<<<<<<<<<<<
@@ -12601,10 +13364,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran
* def weibull(self, a, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2491; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -12629,6 +13392,9 @@ static char __pyx_doc_6mtrand_11RandomState_64weibull[] = "\n weibull(a,
static PyObject *__pyx_pw_6mtrand_11RandomState_65weibull(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("weibull (wrapper)", 0);
@@ -12636,7 +13402,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_65weibull(PyObject *__pyx_v_self
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":2493
+ /* "mtrand.pyx":2523
* return cont1_array(self.internal_state, rk_pareto, size, oa)
*
* def weibull(self, a, size=None): # <<<<<<<<<<<<<<
@@ -12665,7 +13431,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_65weibull(PyObject *__pyx_v_self
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "weibull") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2493; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "weibull") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2523; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -12680,7 +13446,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_65weibull(PyObject *__pyx_v_self
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("weibull", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2493; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("weibull", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2523; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.weibull", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -12706,7 +13472,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("weibull", 0);
- /* "mtrand.pyx":2580
+ /* "mtrand.pyx":2610
* cdef double fa
*
* fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<<
@@ -12715,7 +13481,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
*/
__pyx_v_fa = PyFloat_AsDouble(__pyx_v_a);
- /* "mtrand.pyx":2581
+ /* "mtrand.pyx":2611
*
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -12725,7 +13491,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2582
+ /* "mtrand.pyx":2612
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred():
* if fa <= 0: # <<<<<<<<<<<<<<
@@ -12735,23 +13501,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
__pyx_t_1 = (__pyx_v_fa <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2583
+ /* "mtrand.pyx":2613
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_weibull, size, fa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_89), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_94), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2613; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2613; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2584
+ /* "mtrand.pyx":2614
* if fa <= 0:
* raise ValueError("a <= 0")
* return cont1_array_sc(self.internal_state, rk_weibull, size, fa) # <<<<<<<<<<<<<<
@@ -12759,7 +13525,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2584; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -12768,7 +13534,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
}
__pyx_L3:;
- /* "mtrand.pyx":2586
+ /* "mtrand.pyx":2616
* return cont1_array_sc(self.internal_state, rk_weibull, size, fa)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -12777,80 +13543,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
*/
PyErr_Clear();
- /* "mtrand.pyx":2588
+ /* "mtrand.pyx":2618
* PyErr_Clear()
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2588; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2618; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oa = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oa = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2589
+ /* "mtrand.pyx":2619
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_weibull, size, oa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oa));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oa));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oa));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2589; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2619; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2590
+ /* "mtrand.pyx":2620
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_weibull, size, oa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_90), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_95), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2620; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2620; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2591
+ /* "mtrand.pyx":2621
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_weibull, size, oa) # <<<<<<<<<<<<<<
@@ -12858,10 +13626,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra
* def power(self, a, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2591; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2621; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -12886,6 +13654,9 @@ static char __pyx_doc_6mtrand_11RandomState_66power[] = "\n power(a, size
static PyObject *__pyx_pw_6mtrand_11RandomState_67power(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("power (wrapper)", 0);
@@ -12893,7 +13664,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_67power(PyObject *__pyx_v_self,
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":2593
+ /* "mtrand.pyx":2623
* return cont1_array(self.internal_state, rk_weibull, size, oa)
*
* def power(self, a, size=None): # <<<<<<<<<<<<<<
@@ -12922,7 +13693,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_67power(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "power") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2593; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "power") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2623; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -12937,7 +13708,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_67power(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("power", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2593; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("power", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2623; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.power", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -12963,7 +13734,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("power", 0);
- /* "mtrand.pyx":2689
+ /* "mtrand.pyx":2719
* cdef double fa
*
* fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<<
@@ -12972,7 +13743,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
*/
__pyx_v_fa = PyFloat_AsDouble(__pyx_v_a);
- /* "mtrand.pyx":2690
+ /* "mtrand.pyx":2720
*
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -12982,7 +13753,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2691
+ /* "mtrand.pyx":2721
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred():
* if fa <= 0: # <<<<<<<<<<<<<<
@@ -12992,23 +13763,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
__pyx_t_1 = (__pyx_v_fa <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2692
+ /* "mtrand.pyx":2722
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_power, size, fa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_91), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_96), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2693
+ /* "mtrand.pyx":2723
* if fa <= 0:
* raise ValueError("a <= 0")
* return cont1_array_sc(self.internal_state, rk_power, size, fa) # <<<<<<<<<<<<<<
@@ -13016,7 +13787,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2723; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13025,7 +13796,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
}
__pyx_L3:;
- /* "mtrand.pyx":2695
+ /* "mtrand.pyx":2725
* return cont1_array_sc(self.internal_state, rk_power, size, fa)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -13034,80 +13805,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
*/
PyErr_Clear();
- /* "mtrand.pyx":2697
+ /* "mtrand.pyx":2727
* PyErr_Clear()
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2697; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oa = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oa = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":2698
+ /* "mtrand.pyx":2728
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_power, size, oa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oa));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oa));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oa));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2728; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2699
+ /* "mtrand.pyx":2729
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_power, size, oa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_92), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_97), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2700
+ /* "mtrand.pyx":2730
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0")
* return cont1_array(self.internal_state, rk_power, size, oa) # <<<<<<<<<<<<<<
@@ -13115,10 +13888,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand
* def laplace(self, loc=0.0, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2700; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -13144,16 +13917,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_69laplace(PyObject *__pyx_v_self
PyObject *__pyx_v_loc = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("laplace (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__loc,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_93;
- values[1] = __pyx_k_94;
+ values[0] = __pyx_k_98;
+ values[1] = __pyx_k_99;
- /* "mtrand.pyx":2702
+ /* "mtrand.pyx":2732
* return cont1_array(self.internal_state, rk_power, size, oa)
*
* def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -13190,7 +13966,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_69laplace(PyObject *__pyx_v_self
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "laplace") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2702; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "laplace") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2732; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -13207,7 +13983,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_69laplace(PyObject *__pyx_v_self
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("laplace", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2702; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("laplace", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2732; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.laplace", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -13235,7 +14011,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("laplace", 0);
- /* "mtrand.pyx":2778
+ /* "mtrand.pyx":2808
* cdef double floc, fscale
*
* floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<<
@@ -13244,7 +14020,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
*/
__pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc);
- /* "mtrand.pyx":2779
+ /* "mtrand.pyx":2809
*
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -13253,7 +14029,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":2780
+ /* "mtrand.pyx":2810
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -13263,7 +14039,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2781
+ /* "mtrand.pyx":2811
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -13273,23 +14049,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2782
+ /* "mtrand.pyx":2812
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_95), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2782; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_100), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2782; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2783
+ /* "mtrand.pyx":2813
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) # <<<<<<<<<<<<<<
@@ -13297,7 +14073,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2783; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13306,7 +14082,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
}
__pyx_L3:;
- /* "mtrand.pyx":2785
+ /* "mtrand.pyx":2815
* return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -13315,52 +14091,52 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
*/
PyErr_Clear();
- /* "mtrand.pyx":2786
+ /* "mtrand.pyx":2816
*
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2786; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2786; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oloc = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":2787
+ /* "mtrand.pyx":2817
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2817; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2787; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2817; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":2788
+ /* "mtrand.pyx":2818
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oscale));
@@ -13368,40 +14144,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2818; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2789
+ /* "mtrand.pyx":2819
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_96), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_101), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2790
+ /* "mtrand.pyx":2820
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) # <<<<<<<<<<<<<<
@@ -13409,7 +14185,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra
* def gumbel(self, loc=0.0, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2820; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13439,16 +14215,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_71gumbel(PyObject *__pyx_v_self,
PyObject *__pyx_v_loc = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("gumbel (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__loc,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_97;
- values[1] = __pyx_k_98;
+ values[0] = __pyx_k_102;
+ values[1] = __pyx_k_103;
- /* "mtrand.pyx":2792
+ /* "mtrand.pyx":2822
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale)
*
* def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -13485,7 +14264,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_71gumbel(PyObject *__pyx_v_self,
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gumbel") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2792; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gumbel") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2822; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -13502,7 +14281,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_71gumbel(PyObject *__pyx_v_self,
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("gumbel", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2792; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("gumbel", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2822; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.gumbel", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -13530,7 +14309,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gumbel", 0);
- /* "mtrand.pyx":2909
+ /* "mtrand.pyx":2939
* cdef double floc, fscale
*
* floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<<
@@ -13539,7 +14318,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
*/
__pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc);
- /* "mtrand.pyx":2910
+ /* "mtrand.pyx":2940
*
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -13548,7 +14327,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":2911
+ /* "mtrand.pyx":2941
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -13558,7 +14337,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":2912
+ /* "mtrand.pyx":2942
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -13568,23 +14347,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":2913
+ /* "mtrand.pyx":2943
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_99), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_104), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":2914
+ /* "mtrand.pyx":2944
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) # <<<<<<<<<<<<<<
@@ -13592,7 +14371,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13601,7 +14380,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
}
__pyx_L3:;
- /* "mtrand.pyx":2916
+ /* "mtrand.pyx":2946
* return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -13610,52 +14389,52 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
*/
PyErr_Clear();
- /* "mtrand.pyx":2917
+ /* "mtrand.pyx":2947
*
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2947; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2947; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oloc = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":2918
+ /* "mtrand.pyx":2948
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":2919
+ /* "mtrand.pyx":2949
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oscale));
@@ -13663,40 +14442,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":2920
+ /* "mtrand.pyx":2950
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_100), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_105), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":2921
+ /* "mtrand.pyx":2951
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) # <<<<<<<<<<<<<<
@@ -13704,7 +14483,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran
* def logistic(self, loc=0.0, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2951; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13734,16 +14513,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_73logistic(PyObject *__pyx_v_sel
PyObject *__pyx_v_loc = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("logistic (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__loc,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_101;
- values[1] = __pyx_k_102;
+ values[0] = __pyx_k_106;
+ values[1] = __pyx_k_107;
- /* "mtrand.pyx":2923
+ /* "mtrand.pyx":2953
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale)
*
* def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -13780,7 +14562,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_73logistic(PyObject *__pyx_v_sel
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logistic") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2923; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logistic") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2953; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -13797,7 +14579,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_73logistic(PyObject *__pyx_v_sel
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("logistic", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2923; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("logistic", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2953; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.logistic", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -13825,7 +14607,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("logistic", 0);
- /* "mtrand.pyx":2997
+ /* "mtrand.pyx":3027
* cdef double floc, fscale
*
* floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<<
@@ -13834,7 +14616,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
*/
__pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc);
- /* "mtrand.pyx":2998
+ /* "mtrand.pyx":3028
*
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -13843,7 +14625,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":2999
+ /* "mtrand.pyx":3029
* floc = PyFloat_AsDouble(loc)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -13853,7 +14635,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3000
+ /* "mtrand.pyx":3030
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -13863,23 +14645,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3001
+ /* "mtrand.pyx":3031
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_103), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_108), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3002
+ /* "mtrand.pyx":3032
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) # <<<<<<<<<<<<<<
@@ -13887,7 +14669,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3002; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3032; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -13896,7 +14678,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
}
__pyx_L3:;
- /* "mtrand.pyx":3004
+ /* "mtrand.pyx":3034
* return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -13905,52 +14687,52 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
*/
PyErr_Clear();
- /* "mtrand.pyx":3005
+ /* "mtrand.pyx":3035
*
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oloc = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3006
+ /* "mtrand.pyx":3036
* PyErr_Clear()
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3006; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3006; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3007
+ /* "mtrand.pyx":3037
* oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oscale));
@@ -13958,40 +14740,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3008
+ /* "mtrand.pyx":3038
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_104), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_109), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3009
+ /* "mtrand.pyx":3039
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) # <<<<<<<<<<<<<<
@@ -13999,7 +14781,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr
* def lognormal(self, mean=0.0, sigma=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3009; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14029,16 +14811,19 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_75lognormal(PyObject *__pyx_v_se
PyObject *__pyx_v_mean = 0;
PyObject *__pyx_v_sigma = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lognormal (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__mean,&__pyx_n_s__sigma,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- values[0] = __pyx_k_105;
- values[1] = __pyx_k_106;
+ values[0] = __pyx_k_110;
+ values[1] = __pyx_k_111;
- /* "mtrand.pyx":3011
+ /* "mtrand.pyx":3041
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale)
*
* def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<<
@@ -14075,7 +14860,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_75lognormal(PyObject *__pyx_v_se
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lognormal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3011; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lognormal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3041; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -14092,7 +14877,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_75lognormal(PyObject *__pyx_v_se
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("lognormal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3011; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("lognormal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3041; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.lognormal", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -14120,7 +14905,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("lognormal", 0);
- /* "mtrand.pyx":3116
+ /* "mtrand.pyx":3146
* cdef double fmean, fsigma
*
* fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<<
@@ -14129,7 +14914,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
*/
__pyx_v_fmean = PyFloat_AsDouble(__pyx_v_mean);
- /* "mtrand.pyx":3117
+ /* "mtrand.pyx":3147
*
* fmean = PyFloat_AsDouble(mean)
* fsigma = PyFloat_AsDouble(sigma) # <<<<<<<<<<<<<<
@@ -14138,7 +14923,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
*/
__pyx_v_fsigma = PyFloat_AsDouble(__pyx_v_sigma);
- /* "mtrand.pyx":3119
+ /* "mtrand.pyx":3149
* fsigma = PyFloat_AsDouble(sigma)
*
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -14148,7 +14933,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3120
+ /* "mtrand.pyx":3150
*
* if not PyErr_Occurred():
* if fsigma <= 0: # <<<<<<<<<<<<<<
@@ -14158,23 +14943,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fsigma <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3121
+ /* "mtrand.pyx":3151
* if not PyErr_Occurred():
* if fsigma <= 0:
* raise ValueError("sigma <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_108), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_113), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3122
+ /* "mtrand.pyx":3152
* if fsigma <= 0:
* raise ValueError("sigma <= 0")
* return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) # <<<<<<<<<<<<<<
@@ -14182,7 +14967,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_fmean, __pyx_v_fsigma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_fmean, __pyx_v_fsigma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14191,7 +14976,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
}
__pyx_L3:;
- /* "mtrand.pyx":3124
+ /* "mtrand.pyx":3154
* return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -14200,52 +14985,52 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
*/
PyErr_Clear();
- /* "mtrand.pyx":3126
+ /* "mtrand.pyx":3156
* PyErr_Clear()
*
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(osigma, 0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_omean = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3127
+ /* "mtrand.pyx":3157
*
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(osigma, 0.0)):
* raise ValueError("sigma <= 0.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_osigma = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3128
+ /* "mtrand.pyx":3158
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(osigma, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("sigma <= 0.0")
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_osigma));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_osigma));
@@ -14253,40 +15038,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3129
+ /* "mtrand.pyx":3159
* osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(osigma, 0.0)):
* raise ValueError("sigma <= 0.0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_110), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_115), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3130
+ /* "mtrand.pyx":3160
* if np.any(np.less_equal(osigma, 0.0)):
* raise ValueError("sigma <= 0.0")
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) # <<<<<<<<<<<<<<
@@ -14294,7 +15079,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt
* def rayleigh(self, scale=1.0, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_omean, __pyx_v_osigma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_omean, __pyx_v_osigma); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3160; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14323,15 +15108,18 @@ static char __pyx_doc_6mtrand_11RandomState_76rayleigh[] = "\n rayleigh(s
static PyObject *__pyx_pw_6mtrand_11RandomState_77rayleigh(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("rayleigh (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- values[0] = __pyx_k_111;
+ values[0] = __pyx_k_116;
- /* "mtrand.pyx":3132
+ /* "mtrand.pyx":3162
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma)
*
* def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<<
@@ -14362,7 +15150,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_77rayleigh(PyObject *__pyx_v_sel
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rayleigh") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3132; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rayleigh") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3162; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -14377,7 +15165,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_77rayleigh(PyObject *__pyx_v_sel
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("rayleigh", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3132; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("rayleigh", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3162; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.rayleigh", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -14403,7 +15191,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("rayleigh", 0);
- /* "mtrand.pyx":3190
+ /* "mtrand.pyx":3220
* cdef double fscale
*
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -14412,7 +15200,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":3192
+ /* "mtrand.pyx":3222
* fscale = PyFloat_AsDouble(scale)
*
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -14422,7 +15210,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3193
+ /* "mtrand.pyx":3223
*
* if not PyErr_Occurred():
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -14432,23 +15220,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3194
+ /* "mtrand.pyx":3224
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_112), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_117), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3195
+ /* "mtrand.pyx":3225
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) # <<<<<<<<<<<<<<
@@ -14456,7 +15244,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3195; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14465,7 +15253,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
}
__pyx_L3:;
- /* "mtrand.pyx":3197
+ /* "mtrand.pyx":3227
* return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -14474,80 +15262,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
*/
PyErr_Clear();
- /* "mtrand.pyx":3199
+ /* "mtrand.pyx":3229
* PyErr_Clear()
*
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3199; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3229; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3200
+ /* "mtrand.pyx":3230
*
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0.0")
* return cont1_array(self.internal_state, rk_rayleigh, size, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oscale));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oscale));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3200; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3201
+ /* "mtrand.pyx":3231
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_rayleigh, size, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_114), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_119), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3202
+ /* "mtrand.pyx":3232
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0.0")
* return cont1_array(self.internal_state, rk_rayleigh, size, oscale) # <<<<<<<<<<<<<<
@@ -14555,10 +15345,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr
* def wald(self, mean, scale, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3202; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_oscale); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3232; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -14584,6 +15374,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P
PyObject *__pyx_v_mean = 0;
PyObject *__pyx_v_scale = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("wald (wrapper)", 0);
@@ -14591,7 +15384,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__mean,&__pyx_n_s__scale,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":3204
+ /* "mtrand.pyx":3234
* return cont1_array(self.internal_state, rk_rayleigh, size, oscale)
*
* def wald(self, mean, scale, size=None): # <<<<<<<<<<<<<<
@@ -14617,7 +15410,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__scale)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3204; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3234; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -14626,7 +15419,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wald") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3204; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wald") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3234; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -14643,7 +15436,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3204; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3234; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.wald", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -14671,7 +15464,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("wald", 0);
- /* "mtrand.pyx":3270
+ /* "mtrand.pyx":3300
* cdef double fmean, fscale
*
* fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<<
@@ -14680,7 +15473,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
*/
__pyx_v_fmean = PyFloat_AsDouble(__pyx_v_mean);
- /* "mtrand.pyx":3271
+ /* "mtrand.pyx":3301
*
* fmean = PyFloat_AsDouble(mean)
* fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<<
@@ -14689,7 +15482,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
*/
__pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale);
- /* "mtrand.pyx":3272
+ /* "mtrand.pyx":3302
* fmean = PyFloat_AsDouble(mean)
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -14699,7 +15492,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3273
+ /* "mtrand.pyx":3303
* fscale = PyFloat_AsDouble(scale)
* if not PyErr_Occurred():
* if fmean <= 0: # <<<<<<<<<<<<<<
@@ -14709,23 +15502,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
__pyx_t_1 = (__pyx_v_fmean <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3274
+ /* "mtrand.pyx":3304
* if not PyErr_Occurred():
* if fmean <= 0:
* raise ValueError("mean <= 0") # <<<<<<<<<<<<<<
* if fscale <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_116), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_121), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3275
+ /* "mtrand.pyx":3305
* if fmean <= 0:
* raise ValueError("mean <= 0")
* if fscale <= 0: # <<<<<<<<<<<<<<
@@ -14735,23 +15528,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
__pyx_t_1 = (__pyx_v_fscale <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3276
+ /* "mtrand.pyx":3306
* raise ValueError("mean <= 0")
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_117), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_122), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3277
+ /* "mtrand.pyx":3307
* if fscale <= 0:
* raise ValueError("scale <= 0")
* return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) # <<<<<<<<<<<<<<
@@ -14759,7 +15552,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_fmean, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_fmean, __pyx_v_fscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3307; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14768,7 +15561,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
}
__pyx_L3:;
- /* "mtrand.pyx":3279
+ /* "mtrand.pyx":3309
* return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -14777,52 +15570,52 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
*/
PyErr_Clear();
- /* "mtrand.pyx":3280
+ /* "mtrand.pyx":3310
*
* PyErr_Clear()
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(omean,0.0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3280; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3310; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3280; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3310; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_omean = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3281
+ /* "mtrand.pyx":3311
* PyErr_Clear()
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(omean,0.0)):
* raise ValueError("mean <= 0.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3311; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_6mtrand_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3311; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_oscale = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":3282
+ /* "mtrand.pyx":3312
* omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(omean,0.0)): # <<<<<<<<<<<<<<
* raise ValueError("mean <= 0.0")
* elif np.any(np.less_equal(oscale,0.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_omean));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_omean));
@@ -14830,58 +15623,58 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3312; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3283
+ /* "mtrand.pyx":3313
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(omean,0.0)):
* raise ValueError("mean <= 0.0") # <<<<<<<<<<<<<<
* elif np.any(np.less_equal(oscale,0.0)):
* raise ValueError("scale <= 0.0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_119), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_124), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
- /* "mtrand.pyx":3284
+ /* "mtrand.pyx":3314
* if np.any(np.less_equal(omean,0.0)):
* raise ValueError("mean <= 0.0")
* elif np.any(np.less_equal(oscale,0.0)): # <<<<<<<<<<<<<<
* raise ValueError("scale <= 0.0")
* return cont2_array(self.internal_state, rk_wald, size, omean, oscale)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_oscale));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_oscale));
@@ -14889,40 +15682,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3314; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3285
+ /* "mtrand.pyx":3315
* raise ValueError("mean <= 0.0")
* elif np.any(np.less_equal(oscale,0.0)):
* raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_wald, size, omean, oscale)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_120), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_125), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3286
+ /* "mtrand.pyx":3316
* elif np.any(np.less_equal(oscale,0.0)):
* raise ValueError("scale <= 0.0")
* return cont2_array(self.internal_state, rk_wald, size, omean, oscale) # <<<<<<<<<<<<<<
@@ -14930,7 +15723,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_
*
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_omean, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_omean, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3316; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -14961,6 +15754,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s
PyObject *__pyx_v_mode = 0;
PyObject *__pyx_v_right = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("triangular (wrapper)", 0);
@@ -14968,7 +15764,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__left,&__pyx_n_s__mode,&__pyx_n_s__right,&__pyx_n_s__size,0};
PyObject* values[4] = {0,0,0,0};
- /* "mtrand.pyx":3290
+ /* "mtrand.pyx":3320
*
*
* def triangular(self, left, mode, right, size=None): # <<<<<<<<<<<<<<
@@ -14995,12 +15791,12 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__mode)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3290; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3320; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__right)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3290; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3320; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
@@ -15009,7 +15805,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "triangular") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3290; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "triangular") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3320; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -15028,7 +15824,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3290; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3320; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.triangular", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -15058,7 +15854,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("triangular", 0);
- /* "mtrand.pyx":3350
+ /* "mtrand.pyx":3380
* cdef double fleft, fmode, fright
*
* fleft = PyFloat_AsDouble(left) # <<<<<<<<<<<<<<
@@ -15067,7 +15863,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
__pyx_v_fleft = PyFloat_AsDouble(__pyx_v_left);
- /* "mtrand.pyx":3351
+ /* "mtrand.pyx":3381
*
* fleft = PyFloat_AsDouble(left)
* fright = PyFloat_AsDouble(right) # <<<<<<<<<<<<<<
@@ -15076,7 +15872,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
__pyx_v_fright = PyFloat_AsDouble(__pyx_v_right);
- /* "mtrand.pyx":3352
+ /* "mtrand.pyx":3382
* fleft = PyFloat_AsDouble(left)
* fright = PyFloat_AsDouble(right)
* fmode = PyFloat_AsDouble(mode) # <<<<<<<<<<<<<<
@@ -15085,7 +15881,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
__pyx_v_fmode = PyFloat_AsDouble(__pyx_v_mode);
- /* "mtrand.pyx":3353
+ /* "mtrand.pyx":3383
* fright = PyFloat_AsDouble(right)
* fmode = PyFloat_AsDouble(mode)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -15095,7 +15891,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3354
+ /* "mtrand.pyx":3384
* fmode = PyFloat_AsDouble(mode)
* if not PyErr_Occurred():
* if fleft > fmode: # <<<<<<<<<<<<<<
@@ -15105,23 +15901,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__pyx_t_1 = (__pyx_v_fleft > __pyx_v_fmode);
if (__pyx_t_1) {
- /* "mtrand.pyx":3355
+ /* "mtrand.pyx":3385
* if not PyErr_Occurred():
* if fleft > fmode:
* raise ValueError("left > mode") # <<<<<<<<<<<<<<
* if fmode > fright:
* raise ValueError("mode > right")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_122), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_127), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3356
+ /* "mtrand.pyx":3386
* if fleft > fmode:
* raise ValueError("left > mode")
* if fmode > fright: # <<<<<<<<<<<<<<
@@ -15131,23 +15927,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__pyx_t_1 = (__pyx_v_fmode > __pyx_v_fright);
if (__pyx_t_1) {
- /* "mtrand.pyx":3357
+ /* "mtrand.pyx":3387
* raise ValueError("left > mode")
* if fmode > fright:
* raise ValueError("mode > right") # <<<<<<<<<<<<<<
* if fleft == fright:
* raise ValueError("left == right")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_124), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_129), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3358
+ /* "mtrand.pyx":3388
* if fmode > fright:
* raise ValueError("mode > right")
* if fleft == fright: # <<<<<<<<<<<<<<
@@ -15157,23 +15953,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__pyx_t_1 = (__pyx_v_fleft == __pyx_v_fright);
if (__pyx_t_1) {
- /* "mtrand.pyx":3359
+ /* "mtrand.pyx":3389
* raise ValueError("mode > right")
* if fleft == fright:
* raise ValueError("left == right") # <<<<<<<<<<<<<<
* return cont3_array_sc(self.internal_state, rk_triangular, size, fleft,
* fmode, fright)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_126), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_131), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3360
+ /* "mtrand.pyx":3390
* if fleft == fright:
* raise ValueError("left == right")
* return cont3_array_sc(self.internal_state, rk_triangular, size, fleft, # <<<<<<<<<<<<<<
@@ -15182,14 +15978,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3361
+ /* "mtrand.pyx":3391
* raise ValueError("left == right")
* return cont3_array_sc(self.internal_state, rk_triangular, size, fleft,
* fmode, fright) # <<<<<<<<<<<<<<
*
* PyErr_Clear()
*/
- __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_fleft, __pyx_v_fmode, __pyx_v_fright); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_fleft, __pyx_v_fmode, __pyx_v_fright); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -15198,7 +15994,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
}
__pyx_L3:;
- /* "mtrand.pyx":3363
+ /* "mtrand.pyx":3393
* fmode, fright)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -15207,121 +16003,127 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
PyErr_Clear();
- /* "mtrand.pyx":3364
+ /* "mtrand.pyx":3394
*
* PyErr_Clear()
* oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_left, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3364; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_left, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oleft = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oleft = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3365
+ /* "mtrand.pyx":3395
* PyErr_Clear()
* oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3365; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_omode = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":3366
+ /* "mtrand.pyx":3396
* oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
*
* if np.any(np.greater(oleft, omode)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_right, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_right, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oright = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oright = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3368
+ /* "mtrand.pyx":3398
* oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
*
* if np.any(np.greater(oleft, omode)): # <<<<<<<<<<<<<<
* raise ValueError("left > mode")
* if np.any(np.greater(omode, oright)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__greater); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__greater); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_oleft));
- PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_oleft));
+ PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_oleft));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oleft));
__Pyx_INCREF(((PyObject *)__pyx_v_omode));
- PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_omode));
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_omode));
__Pyx_GIVEREF(((PyObject *)__pyx_v_omode));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3398; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3369
+ /* "mtrand.pyx":3399
*
* if np.any(np.greater(oleft, omode)):
* raise ValueError("left > mode") # <<<<<<<<<<<<<<
* if np.any(np.greater(omode, oright)):
* raise ValueError("mode > right")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_127), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_132), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3370
+ /* "mtrand.pyx":3400
* if np.any(np.greater(oleft, omode)):
* raise ValueError("left > mode")
* if np.any(np.greater(omode, oright)): # <<<<<<<<<<<<<<
* raise ValueError("mode > right")
* if np.any(np.equal(oleft, oright)):
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__greater); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_omode));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_omode));
@@ -15329,57 +16131,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__Pyx_INCREF(((PyObject *)__pyx_v_oright));
PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_oright));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oright));
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3400; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3371
+ /* "mtrand.pyx":3401
* raise ValueError("left > mode")
* if np.any(np.greater(omode, oright)):
* raise ValueError("mode > right") # <<<<<<<<<<<<<<
* if np.any(np.equal(oleft, oright)):
* raise ValueError("left == right")
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_128), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_133), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
- /* "mtrand.pyx":3372
+ /* "mtrand.pyx":3402
* if np.any(np.greater(omode, oright)):
* raise ValueError("mode > right")
* if np.any(np.equal(oleft, oright)): # <<<<<<<<<<<<<<
* raise ValueError("left == right")
* return cont3_array(self.internal_state, rk_triangular, size, oleft,
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__equal); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_oleft));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_oleft));
@@ -15387,40 +16189,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
__Pyx_INCREF(((PyObject *)__pyx_v_oright));
PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_oright));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oright));
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_3);
- __pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3373
+ /* "mtrand.pyx":3403
* raise ValueError("mode > right")
* if np.any(np.equal(oleft, oright)):
* raise ValueError("left == right") # <<<<<<<<<<<<<<
* return cont3_array(self.internal_state, rk_triangular, size, oleft,
* omode, oright)
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_129), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_Raise(__pyx_t_3, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_134), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L9;
}
__pyx_L9:;
- /* "mtrand.pyx":3374
+ /* "mtrand.pyx":3404
* if np.any(np.equal(oleft, oright)):
* raise ValueError("left == right")
* return cont3_array(self.internal_state, rk_triangular, size, oleft, # <<<<<<<<<<<<<<
@@ -15429,17 +16231,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3375
+ /* "mtrand.pyx":3405
* raise ValueError("left == right")
* return cont3_array(self.internal_state, rk_triangular, size, oleft,
* omode, oright) # <<<<<<<<<<<<<<
*
* # Complicated, discrete distributions:
*/
- __pyx_t_3 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_oleft, __pyx_v_omode, __pyx_v_oright); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
+ __pyx_t_2 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_oleft, __pyx_v_omode, __pyx_v_oright); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3404; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -15462,11 +16264,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_82binomial[] = "\n binomial(n, p, size=None)\n\n Draw samples from a binomial distribution.\n\n Samples are drawn from a Binomial distribution with specified\n parameters, n trials and p probability of success where\n n an integer > 0 and p is in the interval [0,1]. (n may be\n input as a float, but it is truncated to an integer in use)\n\n Parameters\n ----------\n n : float (but truncated to an integer)\n parameter, > 0.\n p : float\n parameter, >= 0 and <=1.\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.binom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Binomial distribution is\n\n .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},\n\n where :math:`n` is the number of trials, :math:`p` is the probability\n of success, and :math:`N` is the number of successes.\n\n When estimating the standard error of a proportion in a population by\n using a random sample, the normal distribution works well unless the\n product p*n <=5, where p = population proportion estimate, and n =\n number of samples, in which case the binomial distribution is used\n instead. For example, a sample of 15 people shows 4 who are left\n handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,\n so the binomial distribution should be used in this case.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics with R\",\n Springer-Verlag, 2002.\n "" .. [2] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [3] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [4] Weisstein, Eric W. \"Binomial Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/BinomialDistribution.html\n .. [5] Wikipedia, \"Binomial-distribution\",\n http://en.wikipedia.org/wiki/Binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> n, p = 10, .5 # number of trials, probability of each trial\n >>> s = np.random.binomial(n, p, 1000)\n # result of flipping a coin 10 times, tested 1000 times.\n\n A real world example. A company drills 9 wild-cat oil exploration\n wells, each with an estimated probability of success of 0.1. All nine\n wells fail. What is the probability of that happening?\n\n Let's do 20,000 trials of the model, and count the number that\n generate zero positive results.\n\n >>> sum(np.random.binomial(9,0.1,20000)==0)/20000.\n answer = 0.38885, or 38%.\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_82binomial[] = "\n binomial(n, p, size=None)\n\n Draw samples from a binomial distribution.\n\n Samples are drawn from a Binomial distribution with specified\n parameters, n trials and p probability of success where\n n an integer >= 0 and p is in the interval [0,1]. (n may be\n input as a float, but it is truncated to an integer in use)\n\n Parameters\n ----------\n n : float (but truncated to an integer)\n parameter, >= 0.\n p : float\n parameter, >= 0 and <=1.\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.binom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Binomial distribution is\n\n .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},\n\n where :math:`n` is the number of trials, :math:`p` is the probability\n of success, and :math:`N` is the number of successes.\n\n When estimating the standard error of a proportion in a population by\n using a random sample, the normal distribution works well unless the\n product p*n <=5, where p = population proportion estimate, and n =\n number of samples, in which case the binomial distribution is used\n instead. For example, a sample of 15 people shows 4 who are left\n handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,\n so the binomial distribution should be used in this case.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics with R\",\n Springer-Verlag, 2002.""\n .. [2] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [3] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [4] Weisstein, Eric W. \"Binomial Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/BinomialDistribution.html\n .. [5] Wikipedia, \"Binomial-distribution\",\n http://en.wikipedia.org/wiki/Binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> n, p = 10, .5 # number of trials, probability of each trial\n >>> s = np.random.binomial(n, p, 1000)\n # result of flipping a coin 10 times, tested 1000 times.\n\n A real world example. A company drills 9 wild-cat oil exploration\n wells, each with an estimated probability of success of 0.1. All nine\n wells fail. What is the probability of that happening?\n\n Let's do 20,000 trials of the model, and count the number that\n generate zero positive results.\n\n >>> sum(np.random.binomial(9,0.1,20000)==0)/20000.\n answer = 0.38885, or 38%.\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_n = 0;
PyObject *__pyx_v_p = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("binomial (wrapper)", 0);
@@ -15474,7 +16279,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_sel
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__p,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":3378
+ /* "mtrand.pyx":3408
*
* # Complicated, discrete distributions:
* def binomial(self, n, p, size=None): # <<<<<<<<<<<<<<
@@ -15500,7 +16305,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_sel
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3378; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3408; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -15509,7 +16314,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_sel
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "binomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3378; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "binomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3408; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -15526,7 +16331,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_sel
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3378; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3408; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.binomial", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -15554,7 +16359,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("binomial", 0);
- /* "mtrand.pyx":3463
+ /* "mtrand.pyx":3493
* cdef double fp
*
* fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<<
@@ -15563,54 +16368,54 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
*/
__pyx_v_fp = PyFloat_AsDouble(__pyx_v_p);
- /* "mtrand.pyx":3464
+ /* "mtrand.pyx":3494
*
* fp = PyFloat_AsDouble(p)
* ln = PyInt_AsLong(n) # <<<<<<<<<<<<<<
* if not PyErr_Occurred():
- * if ln <= 0:
+ * if ln < 0:
*/
__pyx_v_ln = PyInt_AsLong(__pyx_v_n);
- /* "mtrand.pyx":3465
+ /* "mtrand.pyx":3495
* fp = PyFloat_AsDouble(p)
* ln = PyInt_AsLong(n)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
- * if ln <= 0:
- * raise ValueError("n <= 0")
+ * if ln < 0:
+ * raise ValueError("n < 0")
*/
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3466
+ /* "mtrand.pyx":3496
* ln = PyInt_AsLong(n)
* if not PyErr_Occurred():
- * if ln <= 0: # <<<<<<<<<<<<<<
- * raise ValueError("n <= 0")
+ * if ln < 0: # <<<<<<<<<<<<<<
+ * raise ValueError("n < 0")
* if fp < 0:
*/
- __pyx_t_1 = (__pyx_v_ln <= 0);
+ __pyx_t_1 = (__pyx_v_ln < 0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3467
+ /* "mtrand.pyx":3497
* if not PyErr_Occurred():
- * if ln <= 0:
- * raise ValueError("n <= 0") # <<<<<<<<<<<<<<
+ * if ln < 0:
+ * raise ValueError("n < 0") # <<<<<<<<<<<<<<
* if fp < 0:
* raise ValueError("p < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_131), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_136), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3468
- * if ln <= 0:
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3498
+ * if ln < 0:
+ * raise ValueError("n < 0")
* if fp < 0: # <<<<<<<<<<<<<<
* raise ValueError("p < 0")
* elif fp > 1:
@@ -15618,22 +16423,22 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
__pyx_t_1 = (__pyx_v_fp < 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3469
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3499
+ * raise ValueError("n < 0")
* if fp < 0:
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* elif fp > 1:
* raise ValueError("p > 1")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_133), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_138), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
- /* "mtrand.pyx":3470
+ /* "mtrand.pyx":3500
* if fp < 0:
* raise ValueError("p < 0")
* elif fp > 1: # <<<<<<<<<<<<<<
@@ -15643,23 +16448,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
__pyx_t_1 = (__pyx_v_fp > 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3471
+ /* "mtrand.pyx":3501
* raise ValueError("p < 0")
* elif fp > 1:
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_135), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_140), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3472
+ /* "mtrand.pyx":3502
* elif fp > 1:
* raise ValueError("p > 1")
* return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) # <<<<<<<<<<<<<<
@@ -15667,7 +16472,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discnp_array_sc(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_ln, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_discnp_array_sc(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_ln, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -15676,7 +16481,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
}
__pyx_L3:;
- /* "mtrand.pyx":3474
+ /* "mtrand.pyx":3504
* return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -15685,50 +16490,54 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
*/
PyErr_Clear();
- /* "mtrand.pyx":3476
+ /* "mtrand.pyx":3506
* PyErr_Clear()
*
* on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- * if np.any(np.less_equal(n, 0)):
+ * if np.any(np.less(n, 0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_n, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_n, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_on = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_on = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3477
+ /* "mtrand.pyx":3507
*
* on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED)
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
- * if np.any(np.less_equal(n, 0)):
- * raise ValueError("n <= 0")
+ * if np.any(np.less(n, 0)):
+ * raise ValueError("n < 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3477; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_op = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":3478
+ /* "mtrand.pyx":3508
* on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED)
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- * if np.any(np.less_equal(n, 0)): # <<<<<<<<<<<<<<
- * raise ValueError("n <= 0")
+ * if np.any(np.less(n, 0)): # <<<<<<<<<<<<<<
+ * raise ValueError("n < 0")
* if np.any(np.less(p, 0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_n);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n);
@@ -15736,57 +16545,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3479
+ /* "mtrand.pyx":3509
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- * if np.any(np.less_equal(n, 0)):
- * raise ValueError("n <= 0") # <<<<<<<<<<<<<<
+ * if np.any(np.less(n, 0)):
+ * raise ValueError("n < 0") # <<<<<<<<<<<<<<
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_136), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_141), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3480
- * if np.any(np.less_equal(n, 0)):
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3510
+ * if np.any(np.less(n, 0)):
+ * raise ValueError("n < 0")
* if np.any(np.less(p, 0)): # <<<<<<<<<<<<<<
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_p);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_p);
@@ -15794,57 +16603,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3481
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3511
+ * raise ValueError("n < 0")
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_137), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_142), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3482
+ /* "mtrand.pyx":3512
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<<
* raise ValueError("p > 1")
* return discnp_array(self.internal_state, rk_binomial, size, on, op)
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_p);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_p);
@@ -15852,40 +16661,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
__Pyx_INCREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3483
+ /* "mtrand.pyx":3513
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discnp_array(self.internal_state, rk_binomial, size, on, op)
*
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_138), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_143), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
- /* "mtrand.pyx":3484
+ /* "mtrand.pyx":3514
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
* return discnp_array(self.internal_state, rk_binomial, size, on, op) # <<<<<<<<<<<<<<
@@ -15893,7 +16702,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr
* def negative_binomial(self, n, p, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = __pyx_f_6mtrand_discnp_array(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __pyx_f_6mtrand_discnp_array(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
@@ -15923,6 +16732,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__
PyObject *__pyx_v_n = 0;
PyObject *__pyx_v_p = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("negative_binomial (wrapper)", 0);
@@ -15930,7 +16742,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__p,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":3486
+ /* "mtrand.pyx":3516
* return discnp_array(self.internal_state, rk_binomial, size, on, op)
*
* def negative_binomial(self, n, p, size=None): # <<<<<<<<<<<<<<
@@ -15956,7 +16768,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__p)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3486; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3516; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -15965,7 +16777,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "negative_binomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3486; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "negative_binomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3516; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -15982,7 +16794,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3486; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3516; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.negative_binomial", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -16010,7 +16822,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("negative_binomial", 0);
- /* "mtrand.pyx":3556
+ /* "mtrand.pyx":3586
* cdef double fp
*
* fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<<
@@ -16019,7 +16831,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
*/
__pyx_v_fp = PyFloat_AsDouble(__pyx_v_p);
- /* "mtrand.pyx":3557
+ /* "mtrand.pyx":3587
*
* fp = PyFloat_AsDouble(p)
* fn = PyFloat_AsDouble(n) # <<<<<<<<<<<<<<
@@ -16028,7 +16840,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
*/
__pyx_v_fn = PyFloat_AsDouble(__pyx_v_n);
- /* "mtrand.pyx":3558
+ /* "mtrand.pyx":3588
* fp = PyFloat_AsDouble(p)
* fn = PyFloat_AsDouble(n)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -16038,7 +16850,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3559
+ /* "mtrand.pyx":3589
* fn = PyFloat_AsDouble(n)
* if not PyErr_Occurred():
* if fn <= 0: # <<<<<<<<<<<<<<
@@ -16048,23 +16860,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__pyx_t_1 = (__pyx_v_fn <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3560
+ /* "mtrand.pyx":3590
* if not PyErr_Occurred():
* if fn <= 0:
* raise ValueError("n <= 0") # <<<<<<<<<<<<<<
* if fp < 0:
* raise ValueError("p < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_139), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_145), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3561
+ /* "mtrand.pyx":3591
* if fn <= 0:
* raise ValueError("n <= 0")
* if fp < 0: # <<<<<<<<<<<<<<
@@ -16074,22 +16886,22 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__pyx_t_1 = (__pyx_v_fp < 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3562
+ /* "mtrand.pyx":3592
* raise ValueError("n <= 0")
* if fp < 0:
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* elif fp > 1:
* raise ValueError("p > 1")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_140), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_146), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
- /* "mtrand.pyx":3563
+ /* "mtrand.pyx":3593
* if fp < 0:
* raise ValueError("p < 0")
* elif fp > 1: # <<<<<<<<<<<<<<
@@ -16099,23 +16911,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__pyx_t_1 = (__pyx_v_fp > 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3564
+ /* "mtrand.pyx":3594
* raise ValueError("p < 0")
* elif fp > 1:
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discdd_array_sc(self.internal_state, rk_negative_binomial,
* size, fn, fp)
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_141), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_147), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3565
+ /* "mtrand.pyx":3595
* elif fp > 1:
* raise ValueError("p > 1")
* return discdd_array_sc(self.internal_state, rk_negative_binomial, # <<<<<<<<<<<<<<
@@ -16124,14 +16936,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3566
+ /* "mtrand.pyx":3596
* raise ValueError("p > 1")
* return discdd_array_sc(self.internal_state, rk_negative_binomial,
* size, fn, fp) # <<<<<<<<<<<<<<
*
* PyErr_Clear()
*/
- __pyx_t_2 = __pyx_f_6mtrand_discdd_array_sc(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_fn, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3565; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_discdd_array_sc(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_fn, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3595; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -16140,7 +16952,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
}
__pyx_L3:;
- /* "mtrand.pyx":3568
+ /* "mtrand.pyx":3598
* size, fn, fp)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -16149,50 +16961,54 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
*/
PyErr_Clear();
- /* "mtrand.pyx":3570
+ /* "mtrand.pyx":3600
* PyErr_Clear()
*
* on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(n, 0)):
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_n, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3570; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_n, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_on = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_on = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3571
+ /* "mtrand.pyx":3601
*
* on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(n, 0)):
* raise ValueError("n <= 0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3571; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3601; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_op = ((PyArrayObject *)__pyx_t_2);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":3572
+ /* "mtrand.pyx":3602
* on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(n, 0)): # <<<<<<<<<<<<<<
* raise ValueError("n <= 0")
* if np.any(np.less(p, 0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_n);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_n);
@@ -16200,57 +17016,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3602; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3573
+ /* "mtrand.pyx":3603
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(n, 0)):
* raise ValueError("n <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_142), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3573; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_148), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3573; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3574
+ /* "mtrand.pyx":3604
* if np.any(np.less_equal(n, 0)):
* raise ValueError("n <= 0")
* if np.any(np.less(p, 0)): # <<<<<<<<<<<<<<
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_p);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_p);
@@ -16258,57 +17074,57 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3574; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3575
+ /* "mtrand.pyx":3605
* raise ValueError("n <= 0")
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_143), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_149), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3576
+ /* "mtrand.pyx":3606
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<<
* raise ValueError("p > 1")
* return discdd_array(self.internal_state, rk_negative_binomial, size,
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_p);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_p);
@@ -16316,40 +17132,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
__Pyx_INCREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3606; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3577
+ /* "mtrand.pyx":3607
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discdd_array(self.internal_state, rk_negative_binomial, size,
* on, op)
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_144), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_150), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
- /* "mtrand.pyx":3578
+ /* "mtrand.pyx":3608
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
* return discdd_array(self.internal_state, rk_negative_binomial, size, # <<<<<<<<<<<<<<
@@ -16358,14 +17174,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3579
+ /* "mtrand.pyx":3609
* raise ValueError("p > 1")
* return discdd_array(self.internal_state, rk_negative_binomial, size,
* on, op) # <<<<<<<<<<<<<<
*
* def poisson(self, lam=1.0, size=None):
*/
- __pyx_t_3 = __pyx_f_6mtrand_discdd_array(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3578; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __pyx_f_6mtrand_discdd_array(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3608; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
@@ -16394,15 +17210,18 @@ static char __pyx_doc_6mtrand_11RandomState_86poisson[] = "\n poisson(lam
static PyObject *__pyx_pw_6mtrand_11RandomState_87poisson(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_lam = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("poisson (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__lam,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- values[0] = __pyx_k_145;
+ values[0] = __pyx_k_151;
- /* "mtrand.pyx":3581
+ /* "mtrand.pyx":3611
* on, op)
*
* def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<<
@@ -16433,7 +17252,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_87poisson(PyObject *__pyx_v_self
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "poisson") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3581; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "poisson") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3611; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -16448,7 +17267,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_87poisson(PyObject *__pyx_v_self
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("poisson", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3581; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("poisson", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3611; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.poisson", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -16474,7 +17293,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("poisson", 0);
- /* "mtrand.pyx":3635
+ /* "mtrand.pyx":3665
* cdef ndarray olam
* cdef double flam
* flam = PyFloat_AsDouble(lam) # <<<<<<<<<<<<<<
@@ -16483,7 +17302,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
*/
__pyx_v_flam = PyFloat_AsDouble(__pyx_v_lam);
- /* "mtrand.pyx":3636
+ /* "mtrand.pyx":3666
* cdef double flam
* flam = PyFloat_AsDouble(lam)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -16493,66 +17312,66 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3637
+ /* "mtrand.pyx":3667
* flam = PyFloat_AsDouble(lam)
* if not PyErr_Occurred():
* if lam < 0: # <<<<<<<<<<<<<<
* raise ValueError("lam < 0")
* if lam > self.poisson_lam_max:
*/
- __pyx_t_2 = PyObject_RichCompare(__pyx_v_lam, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3637; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_RichCompare(__pyx_v_lam, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3667; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3667; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3638
+ /* "mtrand.pyx":3668
* if not PyErr_Occurred():
* if lam < 0:
* raise ValueError("lam < 0") # <<<<<<<<<<<<<<
* if lam > self.poisson_lam_max:
* raise ValueError("lam value too large")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_147), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_153), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3639
+ /* "mtrand.pyx":3669
* if lam < 0:
* raise ValueError("lam < 0")
* if lam > self.poisson_lam_max: # <<<<<<<<<<<<<<
* raise ValueError("lam value too large")
* return discd_array_sc(self.internal_state, rk_poisson, size, flam)
*/
- __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__poisson_lam_max); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3639; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__poisson_lam_max); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_RichCompare(__pyx_v_lam, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3639; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_lam, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3639; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3669; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3640
+ /* "mtrand.pyx":3670
* raise ValueError("lam < 0")
* if lam > self.poisson_lam_max:
* raise ValueError("lam value too large") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_poisson, size, flam)
*
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_149), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3640; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_155), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3640; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3641
+ /* "mtrand.pyx":3671
* if lam > self.poisson_lam_max:
* raise ValueError("lam value too large")
* return discd_array_sc(self.internal_state, rk_poisson, size, flam) # <<<<<<<<<<<<<<
@@ -16560,7 +17379,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_flam); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3641; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_flam); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3671; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
@@ -16569,7 +17388,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
}
__pyx_L3:;
- /* "mtrand.pyx":3643
+ /* "mtrand.pyx":3673
* return discd_array_sc(self.internal_state, rk_poisson, size, flam)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -16578,97 +17397,99 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
*/
PyErr_Clear();
- /* "mtrand.pyx":3645
+ /* "mtrand.pyx":3675
* PyErr_Clear()
*
* olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less(olam, 0)):
* raise ValueError("lam < 0")
*/
- __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3675; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
- __pyx_v_olam = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_olam = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":3646
+ /* "mtrand.pyx":3676
*
* olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(olam, 0)): # <<<<<<<<<<<<<<
* raise ValueError("lam < 0")
* if np.any(np.greater(olam, self.poisson_lam_max)):
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_olam));
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_olam));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_olam));
__Pyx_GIVEREF(((PyObject *)__pyx_v_olam));
__Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3676; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3647
+ /* "mtrand.pyx":3677
* olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(olam, 0)):
* raise ValueError("lam < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(olam, self.poisson_lam_max)):
* raise ValueError("lam value too large.")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_150), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_156), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3648
+ /* "mtrand.pyx":3678
* if np.any(np.less(olam, 0)):
* raise ValueError("lam < 0")
* if np.any(np.greater(olam, self.poisson_lam_max)): # <<<<<<<<<<<<<<
* raise ValueError("lam value too large.")
* return discd_array(self.internal_state, rk_poisson, size, olam)
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__greater); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__poisson_lam_max); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__poisson_lam_max); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_olam));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_olam));
@@ -16676,40 +17497,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3649
+ /* "mtrand.pyx":3679
* raise ValueError("lam < 0")
* if np.any(np.greater(olam, self.poisson_lam_max)):
* raise ValueError("lam value too large.") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_poisson, size, olam)
*
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_152), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_158), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3650
+ /* "mtrand.pyx":3680
* if np.any(np.greater(olam, self.poisson_lam_max)):
* raise ValueError("lam value too large.")
* return discd_array(self.internal_state, rk_poisson, size, olam) # <<<<<<<<<<<<<<
@@ -16717,7 +17538,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra
* def zipf(self, a, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_5 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_olam); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_olam); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3680; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
@@ -16745,6 +17566,9 @@ static char __pyx_doc_6mtrand_11RandomState_88zipf[] = "\n zipf(a, size=N
static PyObject *__pyx_pw_6mtrand_11RandomState_89zipf(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_a = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("zipf (wrapper)", 0);
@@ -16752,7 +17576,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_89zipf(PyObject *__pyx_v_self, P
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__a,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":3652
+ /* "mtrand.pyx":3682
* return discd_array(self.internal_state, rk_poisson, size, olam)
*
* def zipf(self, a, size=None): # <<<<<<<<<<<<<<
@@ -16781,7 +17605,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_89zipf(PyObject *__pyx_v_self, P
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "zipf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3652; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "zipf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3682; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -16796,7 +17620,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_89zipf(PyObject *__pyx_v_self, P
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("zipf", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3652; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("zipf", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3682; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.zipf", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -16822,7 +17646,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("zipf", 0);
- /* "mtrand.pyx":3727
+ /* "mtrand.pyx":3757
* cdef double fa
*
* fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<<
@@ -16831,7 +17655,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
*/
__pyx_v_fa = PyFloat_AsDouble(__pyx_v_a);
- /* "mtrand.pyx":3728
+ /* "mtrand.pyx":3758
*
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -16841,7 +17665,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3729
+ /* "mtrand.pyx":3759
* fa = PyFloat_AsDouble(a)
* if not PyErr_Occurred():
* if fa <= 1.0: # <<<<<<<<<<<<<<
@@ -16851,23 +17675,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
__pyx_t_1 = (__pyx_v_fa <= 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3730
+ /* "mtrand.pyx":3760
* if not PyErr_Occurred():
* if fa <= 1.0:
* raise ValueError("a <= 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_zipf, size, fa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_154), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_160), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3731
+ /* "mtrand.pyx":3761
* if fa <= 1.0:
* raise ValueError("a <= 1.0")
* return discd_array_sc(self.internal_state, rk_zipf, size, fa) # <<<<<<<<<<<<<<
@@ -16875,7 +17699,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_fa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -16884,7 +17708,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
}
__pyx_L3:;
- /* "mtrand.pyx":3733
+ /* "mtrand.pyx":3763
* return discd_array_sc(self.internal_state, rk_zipf, size, fa)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -16893,80 +17717,82 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
*/
PyErr_Clear();
- /* "mtrand.pyx":3735
+ /* "mtrand.pyx":3765
* PyErr_Clear()
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oa, 1.0)):
* raise ValueError("a <= 1.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3735; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_oa = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_oa = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3736
+ /* "mtrand.pyx":3766
*
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 1.0)): # <<<<<<<<<<<<<<
* raise ValueError("a <= 1.0")
* return discd_array(self.internal_state, rk_zipf, size, oa)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_oa));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_oa));
__Pyx_GIVEREF(((PyObject *)__pyx_v_oa));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3736; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3766; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3737
+ /* "mtrand.pyx":3767
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 1.0)):
* raise ValueError("a <= 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_zipf, size, oa)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_155), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3737; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3737; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_161), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3738
+ /* "mtrand.pyx":3768
* if np.any(np.less_equal(oa, 1.0)):
* raise ValueError("a <= 1.0")
* return discd_array(self.internal_state, rk_zipf, size, oa) # <<<<<<<<<<<<<<
@@ -16974,10 +17800,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_
* def geometric(self, p, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_oa); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -17002,6 +17828,9 @@ static char __pyx_doc_6mtrand_11RandomState_90geometric[] = "\n geometric
static PyObject *__pyx_pw_6mtrand_11RandomState_91geometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_p = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("geometric (wrapper)", 0);
@@ -17009,7 +17838,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_91geometric(PyObject *__pyx_v_se
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__p,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":3740
+ /* "mtrand.pyx":3770
* return discd_array(self.internal_state, rk_zipf, size, oa)
*
* def geometric(self, p, size=None): # <<<<<<<<<<<<<<
@@ -17038,7 +17867,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_91geometric(PyObject *__pyx_v_se
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "geometric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3740; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "geometric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3770; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -17053,7 +17882,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_91geometric(PyObject *__pyx_v_se
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("geometric", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3740; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("geometric", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3770; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.geometric", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -17079,7 +17908,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("geometric", 0);
- /* "mtrand.pyx":3788
+ /* "mtrand.pyx":3818
* cdef double fp
*
* fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<<
@@ -17088,7 +17917,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
*/
__pyx_v_fp = PyFloat_AsDouble(__pyx_v_p);
- /* "mtrand.pyx":3789
+ /* "mtrand.pyx":3819
*
* fp = PyFloat_AsDouble(p)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -17098,7 +17927,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3790
+ /* "mtrand.pyx":3820
* fp = PyFloat_AsDouble(p)
* if not PyErr_Occurred():
* if fp < 0.0: # <<<<<<<<<<<<<<
@@ -17108,23 +17937,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fp < 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3791
+ /* "mtrand.pyx":3821
* if not PyErr_Occurred():
* if fp < 0.0:
* raise ValueError("p < 0.0") # <<<<<<<<<<<<<<
* if fp > 1.0:
* raise ValueError("p > 1.0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_157), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_163), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3792
+ /* "mtrand.pyx":3822
* if fp < 0.0:
* raise ValueError("p < 0.0")
* if fp > 1.0: # <<<<<<<<<<<<<<
@@ -17134,23 +17963,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fp > 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3793
+ /* "mtrand.pyx":3823
* raise ValueError("p < 0.0")
* if fp > 1.0:
* raise ValueError("p > 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_geometric, size, fp)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_159), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_165), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3794
+ /* "mtrand.pyx":3824
* if fp > 1.0:
* raise ValueError("p > 1.0")
* return discd_array_sc(self.internal_state, rk_geometric, size, fp) # <<<<<<<<<<<<<<
@@ -17158,7 +17987,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -17167,7 +17996,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
}
__pyx_L3:;
- /* "mtrand.pyx":3796
+ /* "mtrand.pyx":3826
* return discd_array_sc(self.internal_state, rk_geometric, size, fp)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -17176,140 +18005,142 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
*/
PyErr_Clear();
- /* "mtrand.pyx":3799
+ /* "mtrand.pyx":3829
*
*
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less(op, 0.0)):
* raise ValueError("p < 0.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_op = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_op = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3800
+ /* "mtrand.pyx":3830
*
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(op, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("p < 0.0")
* if np.any(np.greater(op, 1.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_op));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_op));
__Pyx_GIVEREF(((PyObject *)__pyx_v_op));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3800; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3801
+ /* "mtrand.pyx":3831
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(op, 0.0)):
* raise ValueError("p < 0.0") # <<<<<<<<<<<<<<
* if np.any(np.greater(op, 1.0)):
* raise ValueError("p > 1.0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_160), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_166), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3802
+ /* "mtrand.pyx":3832
* if np.any(np.less(op, 0.0)):
* raise ValueError("p < 0.0")
* if np.any(np.greater(op, 1.0)): # <<<<<<<<<<<<<<
* raise ValueError("p > 1.0")
* return discd_array(self.internal_state, rk_geometric, size, op)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__greater); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__greater); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_op));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_op));
__Pyx_GIVEREF(((PyObject *)__pyx_v_op));
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3803
+ /* "mtrand.pyx":3833
* raise ValueError("p < 0.0")
* if np.any(np.greater(op, 1.0)):
* raise ValueError("p > 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_geometric, size, op)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_161), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_167), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3804
+ /* "mtrand.pyx":3834
* if np.any(np.greater(op, 1.0)):
* raise ValueError("p > 1.0")
* return discd_array(self.internal_state, rk_geometric, size, op) # <<<<<<<<<<<<<<
@@ -17317,10 +18148,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
* def hypergeometric(self, ngood, nbad, nsample, size=None):
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_op); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3804; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -17341,12 +18172,15 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_92hypergeometric[] = "\n hypergeometric(ngood, nbad, nsample, size=None)\n\n Draw samples from a Hypergeometric distribution.\n\n Samples are drawn from a Hypergeometric distribution with specified\n parameters, ngood (ways to make a good selection), nbad (ways to make\n a bad selection), and nsample = number of items sampled, which is less\n than or equal to the sum ngood + nbad.\n\n Parameters\n ----------\n ngood : float (but truncated to an integer)\n parameter, > 0.\n nbad : float\n parameter, >= 0.\n nsample : float\n parameter, > 0 and <= ngood+nbad\n size : {tuple, int}\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : {ndarray, scalar}\n where the values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.hypergeom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Hypergeometric distribution is\n\n .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}},\n\n where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n`\n\n for P(x) the probability of x successes, n = ngood, m = nbad, and\n N = number of samples.\n\n Consider an urn with black and white marbles in it, ngood of them\n black and nbad are white. If you draw nsample balls without\n replacement, then the Hypergeometric distribution describes the\n distribution of black balls in the drawn sample.\n\n Note that this distribution is very similar to the Binomial\n distribution, except that in this case, samples are drawn without\n replacement, whereas in the Binomial case samples are drawn wit""h\n replacement (or the sample space is infinite). As the sample space\n becomes large, this distribution approaches the Binomial.\n\n References\n ----------\n .. [1] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [2] Weisstein, Eric W. \"Hypergeometric Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/HypergeometricDistribution.html\n .. [3] Wikipedia, \"Hypergeometric-distribution\",\n http://en.wikipedia.org/wiki/Hypergeometric-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> ngood, nbad, nsamp = 100, 2, 10\n # number of good, number of bad, and number of samples\n >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)\n >>> hist(s)\n # note that it is very unlikely to grab both bad items\n\n Suppose you have an urn with 15 white and 15 black marbles.\n If you pull 15 marbles at random, how likely is it that\n 12 or more of them are one color?\n\n >>> s = np.random.hypergeometric(15, 15, 15, 100000)\n >>> sum(s>=12)/100000. + sum(s<=3)/100000.\n # answer = 0.003 ... pretty unlikely!\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_92hypergeometric[] = "\n hypergeometric(ngood, nbad, nsample, size=None)\n\n Draw samples from a Hypergeometric distribution.\n\n Samples are drawn from a Hypergeometric distribution with specified\n parameters, ngood (ways to make a good selection), nbad (ways to make\n a bad selection), and nsample = number of items sampled, which is less\n than or equal to the sum ngood + nbad.\n\n Parameters\n ----------\n ngood : int or array_like\n Number of ways to make a good selection. Must be nonnegative.\n nbad : int or array_like\n Number of ways to make a bad selection. Must be nonnegative.\n nsample : int or array_like\n Number of items sampled. Must be at least 1 and at most\n ``ngood + nbad``.\n size : int or tuple of int\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn.\n\n Returns\n -------\n samples : ndarray or scalar\n The values are all integers in [0, n].\n\n See Also\n --------\n scipy.stats.distributions.hypergeom : probability density function,\n distribution or cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Hypergeometric distribution is\n\n .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}},\n\n where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n`\n\n for P(x) the probability of x successes, n = ngood, m = nbad, and\n N = number of samples.\n\n Consider an urn with black and white marbles in it, ngood of them\n black and nbad are white. If you draw nsample balls without\n replacement, then the Hypergeometric distribution describes the\n distribution of black balls in the drawn sample.\n\n Note that this distribution is very similar to the Binomial\n distrib""ution, except that in this case, samples are drawn without\n replacement, whereas in the Binomial case samples are drawn with\n replacement (or the sample space is infinite). As the sample space\n becomes large, this distribution approaches the Binomial.\n\n References\n ----------\n .. [1] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [2] Weisstein, Eric W. \"Hypergeometric Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/HypergeometricDistribution.html\n .. [3] Wikipedia, \"Hypergeometric-distribution\",\n http://en.wikipedia.org/wiki/Hypergeometric-distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> ngood, nbad, nsamp = 100, 2, 10\n # number of good, number of bad, and number of samples\n >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)\n >>> hist(s)\n # note that it is very unlikely to grab both bad items\n\n Suppose you have an urn with 15 white and 15 black marbles.\n If you pull 15 marbles at random, how likely is it that\n 12 or more of them are one color?\n\n >>> s = np.random.hypergeometric(15, 15, 15, 100000)\n >>> sum(s>=12)/100000. + sum(s<=3)/100000.\n # answer = 0.003 ... pretty unlikely!\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_ngood = 0;
PyObject *__pyx_v_nbad = 0;
PyObject *__pyx_v_nsample = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("hypergeometric (wrapper)", 0);
@@ -17354,7 +18188,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__ngood,&__pyx_n_s__nbad,&__pyx_n_s__nsample,&__pyx_n_s__size,0};
PyObject* values[4] = {0,0,0,0};
- /* "mtrand.pyx":3806
+ /* "mtrand.pyx":3836
* return discd_array(self.internal_state, rk_geometric, size, op)
*
* def hypergeometric(self, ngood, nbad, nsample, size=None): # <<<<<<<<<<<<<<
@@ -17381,12 +18215,12 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__nbad)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3806; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3836; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__nsample)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3806; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3836; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
@@ -17395,7 +18229,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hypergeometric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3806; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hypergeometric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3836; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -17414,7 +18248,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3806; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3836; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.hypergeometric", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -17445,7 +18279,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("hypergeometric", 0);
- /* "mtrand.pyx":3893
+ /* "mtrand.pyx":3924
* cdef long lngood, lnbad, lnsample
*
* lngood = PyInt_AsLong(ngood) # <<<<<<<<<<<<<<
@@ -17454,7 +18288,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
*/
__pyx_v_lngood = PyInt_AsLong(__pyx_v_ngood);
- /* "mtrand.pyx":3894
+ /* "mtrand.pyx":3925
*
* lngood = PyInt_AsLong(ngood)
* lnbad = PyInt_AsLong(nbad) # <<<<<<<<<<<<<<
@@ -17463,142 +18297,131 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
*/
__pyx_v_lnbad = PyInt_AsLong(__pyx_v_nbad);
- /* "mtrand.pyx":3895
+ /* "mtrand.pyx":3926
* lngood = PyInt_AsLong(ngood)
* lnbad = PyInt_AsLong(nbad)
* lnsample = PyInt_AsLong(nsample) # <<<<<<<<<<<<<<
* if not PyErr_Occurred():
- * if ngood < 1:
+ * if lngood < 0:
*/
__pyx_v_lnsample = PyInt_AsLong(__pyx_v_nsample);
- /* "mtrand.pyx":3896
+ /* "mtrand.pyx":3927
* lnbad = PyInt_AsLong(nbad)
* lnsample = PyInt_AsLong(nsample)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
- * if ngood < 1:
- * raise ValueError("ngood < 1")
+ * if lngood < 0:
+ * raise ValueError("ngood < 0")
*/
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":3897
+ /* "mtrand.pyx":3928
* lnsample = PyInt_AsLong(nsample)
* if not PyErr_Occurred():
- * if ngood < 1: # <<<<<<<<<<<<<<
- * raise ValueError("ngood < 1")
- * if nbad < 1:
+ * if lngood < 0: # <<<<<<<<<<<<<<
+ * raise ValueError("ngood < 0")
+ * if lnbad < 0:
*/
- __pyx_t_2 = PyObject_RichCompare(__pyx_v_ngood, __pyx_int_1, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3897; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3897; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = (__pyx_v_lngood < 0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3898
+ /* "mtrand.pyx":3929
* if not PyErr_Occurred():
- * if ngood < 1:
- * raise ValueError("ngood < 1") # <<<<<<<<<<<<<<
- * if nbad < 1:
- * raise ValueError("nbad < 1")
+ * if lngood < 0:
+ * raise ValueError("ngood < 0") # <<<<<<<<<<<<<<
+ * if lnbad < 0:
+ * raise ValueError("nbad < 0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_163), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3898; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_169), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3929; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3898; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3929; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":3899
- * if ngood < 1:
- * raise ValueError("ngood < 1")
- * if nbad < 1: # <<<<<<<<<<<<<<
- * raise ValueError("nbad < 1")
- * if nsample < 1:
+ /* "mtrand.pyx":3930
+ * if lngood < 0:
+ * raise ValueError("ngood < 0")
+ * if lnbad < 0: # <<<<<<<<<<<<<<
+ * raise ValueError("nbad < 0")
+ * if lnsample < 1:
*/
- __pyx_t_2 = PyObject_RichCompare(__pyx_v_nbad, __pyx_int_1, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3899; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3899; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = (__pyx_v_lnbad < 0);
if (__pyx_t_1) {
- /* "mtrand.pyx":3900
- * raise ValueError("ngood < 1")
- * if nbad < 1:
- * raise ValueError("nbad < 1") # <<<<<<<<<<<<<<
- * if nsample < 1:
+ /* "mtrand.pyx":3931
+ * raise ValueError("ngood < 0")
+ * if lnbad < 0:
+ * raise ValueError("nbad < 0") # <<<<<<<<<<<<<<
+ * if lnsample < 1:
* raise ValueError("nsample < 1")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_165), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_171), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3931; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3931; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":3901
- * if nbad < 1:
- * raise ValueError("nbad < 1")
- * if nsample < 1: # <<<<<<<<<<<<<<
+ /* "mtrand.pyx":3932
+ * if lnbad < 0:
+ * raise ValueError("nbad < 0")
+ * if lnsample < 1: # <<<<<<<<<<<<<<
* raise ValueError("nsample < 1")
- * if ngood + nbad < nsample:
+ * if lngood + lnbad < lnsample:
*/
- __pyx_t_2 = PyObject_RichCompare(__pyx_v_nsample, __pyx_int_1, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3901; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3901; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = (__pyx_v_lnsample < 1);
if (__pyx_t_1) {
- /* "mtrand.pyx":3902
- * raise ValueError("nbad < 1")
- * if nsample < 1:
+ /* "mtrand.pyx":3933
+ * raise ValueError("nbad < 0")
+ * if lnsample < 1:
* raise ValueError("nsample < 1") # <<<<<<<<<<<<<<
- * if ngood + nbad < nsample:
+ * if lngood + lnbad < lnsample:
* raise ValueError("ngood + nbad < nsample")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_167), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_173), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3933; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3933; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":3903
- * if nsample < 1:
+ /* "mtrand.pyx":3934
+ * if lnsample < 1:
* raise ValueError("nsample < 1")
- * if ngood + nbad < nsample: # <<<<<<<<<<<<<<
+ * if lngood + lnbad < lnsample: # <<<<<<<<<<<<<<
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array_sc(self.internal_state, rk_hypergeometric, size,
*/
- __pyx_t_2 = PyNumber_Add(__pyx_v_ngood, __pyx_v_nbad); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_v_nsample, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_1 = ((__pyx_v_lngood + __pyx_v_lnbad) < __pyx_v_lnsample);
if (__pyx_t_1) {
- /* "mtrand.pyx":3904
+ /* "mtrand.pyx":3935
* raise ValueError("nsample < 1")
- * if ngood + nbad < nsample:
+ * if lngood + lnbad < lnsample:
* raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<<
* return discnmN_array_sc(self.internal_state, rk_hypergeometric, size,
* lngood, lnbad, lnsample)
*/
- __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_169), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_Raise(__pyx_t_3, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_175), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3935; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3935; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":3905
- * if ngood + nbad < nsample:
+ /* "mtrand.pyx":3936
+ * if lngood + lnbad < lnsample:
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array_sc(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<<
* lngood, lnbad, lnsample)
@@ -17606,24 +18429,24 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3906
+ /* "mtrand.pyx":3937
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array_sc(self.internal_state, rk_hypergeometric, size,
* lngood, lnbad, lnsample) # <<<<<<<<<<<<<<
*
- *
+ * PyErr_Clear()
*/
- __pyx_t_3 = __pyx_f_6mtrand_discnmN_array_sc(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_lngood, __pyx_v_lnbad, __pyx_v_lnsample); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3905; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
+ __pyx_t_2 = __pyx_f_6mtrand_discnmN_array_sc(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_lngood, __pyx_v_lnbad, __pyx_v_lnsample); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3936; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
goto __pyx_L0;
goto __pyx_L3;
}
__pyx_L3:;
- /* "mtrand.pyx":3909
- *
+ /* "mtrand.pyx":3939
+ * lngood, lnbad, lnsample)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
*
@@ -17631,179 +18454,185 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
*/
PyErr_Clear();
- /* "mtrand.pyx":3911
+ /* "mtrand.pyx":3941
* PyErr_Clear()
*
* ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
*/
- __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_ngood, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3911; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_ngood, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3941; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_ongood = ((PyArrayObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3912
+ /* "mtrand.pyx":3942
*
* ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED)
* onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
- * if np.any(np.less(ongood, 1)):
+ * if np.any(np.less(ongood, 0)):
*/
- __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_nbad, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3912; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_nbad, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3942; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
- __pyx_v_onbad = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_2 = __pyx_t_3;
+ __Pyx_INCREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_onbad = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_2 = 0;
- /* "mtrand.pyx":3913
+ /* "mtrand.pyx":3943
* ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED)
* onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
- * if np.any(np.less(ongood, 1)):
- * raise ValueError("ngood < 1")
+ * if np.any(np.less(ongood, 0)):
+ * raise ValueError("ngood < 0")
*/
- __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_nsample, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_3)));
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nsample, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_onsample = ((PyArrayObject *)__pyx_t_3);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":3914
+ /* "mtrand.pyx":3944
* onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
- * if np.any(np.less(ongood, 1)): # <<<<<<<<<<<<<<
- * raise ValueError("ngood < 1")
- * if np.any(np.less(onbad, 1)):
+ * if np.any(np.less(ongood, 0)): # <<<<<<<<<<<<<<
+ * raise ValueError("ngood < 0")
+ * if np.any(np.less(onbad, 0)):
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_ongood));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_ongood));
__Pyx_GIVEREF(((PyObject *)__pyx_v_ongood));
- __Pyx_INCREF(__pyx_int_1);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
- __Pyx_GIVEREF(__pyx_int_1);
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_INCREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3914; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3944; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3915
+ /* "mtrand.pyx":3945
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
- * if np.any(np.less(ongood, 1)):
- * raise ValueError("ngood < 1") # <<<<<<<<<<<<<<
- * if np.any(np.less(onbad, 1)):
- * raise ValueError("nbad < 1")
+ * if np.any(np.less(ongood, 0)):
+ * raise ValueError("ngood < 0") # <<<<<<<<<<<<<<
+ * if np.any(np.less(onbad, 0)):
+ * raise ValueError("nbad < 0")
*/
- __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_170), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_176), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3945; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3945; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
- /* "mtrand.pyx":3916
- * if np.any(np.less(ongood, 1)):
- * raise ValueError("ngood < 1")
- * if np.any(np.less(onbad, 1)): # <<<<<<<<<<<<<<
- * raise ValueError("nbad < 1")
+ /* "mtrand.pyx":3946
+ * if np.any(np.less(ongood, 0)):
+ * raise ValueError("ngood < 0")
+ * if np.any(np.less(onbad, 0)): # <<<<<<<<<<<<<<
+ * raise ValueError("nbad < 0")
* if np.any(np.less(onsample, 1)):
*/
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s__less); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_onbad));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_onbad));
__Pyx_GIVEREF(((PyObject *)__pyx_v_onbad));
- __Pyx_INCREF(__pyx_int_1);
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_1);
- __Pyx_GIVEREF(__pyx_int_1);
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_INCREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3916; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3917
- * raise ValueError("ngood < 1")
- * if np.any(np.less(onbad, 1)):
- * raise ValueError("nbad < 1") # <<<<<<<<<<<<<<
+ /* "mtrand.pyx":3947
+ * raise ValueError("ngood < 0")
+ * if np.any(np.less(onbad, 0)):
+ * raise ValueError("nbad < 0") # <<<<<<<<<<<<<<
* if np.any(np.less(onsample, 1)):
* raise ValueError("nsample < 1")
*/
- __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_171), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_177), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3947; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3947; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L9;
}
__pyx_L9:;
- /* "mtrand.pyx":3918
- * if np.any(np.less(onbad, 1)):
- * raise ValueError("nbad < 1")
+ /* "mtrand.pyx":3948
+ * if np.any(np.less(onbad, 0)):
+ * raise ValueError("nbad < 0")
* if np.any(np.less(onsample, 1)): # <<<<<<<<<<<<<<
* raise ValueError("nsample < 1")
* if np.any(np.less(np.add(ongood, onbad),onsample)):
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__less); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_onsample));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_onsample));
@@ -17811,62 +18640,62 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
__Pyx_INCREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3918; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3919
- * raise ValueError("nbad < 1")
+ /* "mtrand.pyx":3949
+ * raise ValueError("nbad < 0")
* if np.any(np.less(onsample, 1)):
* raise ValueError("nsample < 1") # <<<<<<<<<<<<<<
* if np.any(np.less(np.add(ongood, onbad),onsample)):
* raise ValueError("ngood + nbad < nsample")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_172), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_178), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L10;
}
__pyx_L10:;
- /* "mtrand.pyx":3920
+ /* "mtrand.pyx":3950
* if np.any(np.less(onsample, 1)):
* raise ValueError("nsample < 1")
* if np.any(np.less(np.add(ongood, onbad),onsample)): # <<<<<<<<<<<<<<
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array(self.internal_state, rk_hypergeometric, size,
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__less); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__add); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__add); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_ongood));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_ongood));
@@ -17874,11 +18703,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
__Pyx_INCREF(((PyObject *)__pyx_v_onbad));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_onbad));
__Pyx_GIVEREF(((PyObject *)__pyx_v_onbad));
- __pyx_t_6 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
@@ -17886,40 +18715,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_onsample));
__Pyx_GIVEREF(((PyObject *)__pyx_v_onsample));
__pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
- __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":3921
+ /* "mtrand.pyx":3951
* raise ValueError("nsample < 1")
* if np.any(np.less(np.add(ongood, onbad),onsample)):
* raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<<
* return discnmN_array(self.internal_state, rk_hypergeometric, size,
* ongood, onbad, onsample)
*/
- __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_173), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_179), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3951; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3951; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L11;
}
__pyx_L11:;
- /* "mtrand.pyx":3922
+ /* "mtrand.pyx":3952
* if np.any(np.less(np.add(ongood, onbad),onsample)):
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<<
@@ -17928,14 +18757,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob
*/
__Pyx_XDECREF(__pyx_r);
- /* "mtrand.pyx":3923
+ /* "mtrand.pyx":3953
* raise ValueError("ngood + nbad < nsample")
* return discnmN_array(self.internal_state, rk_hypergeometric, size,
* ongood, onbad, onsample) # <<<<<<<<<<<<<<
*
* def logseries(self, p, size=None):
*/
- __pyx_t_6 = __pyx_f_6mtrand_discnmN_array(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_ongood, __pyx_v_onbad, __pyx_v_onsample); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3922; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = __pyx_f_6mtrand_discnmN_array(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_ongood, __pyx_v_onbad, __pyx_v_onsample); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
@@ -17966,6 +18795,9 @@ static char __pyx_doc_6mtrand_11RandomState_94logseries[] = "\n logseries
static PyObject *__pyx_pw_6mtrand_11RandomState_95logseries(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_p = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("logseries (wrapper)", 0);
@@ -17973,7 +18805,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_95logseries(PyObject *__pyx_v_se
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__p,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":3925
+ /* "mtrand.pyx":3955
* ongood, onbad, onsample)
*
* def logseries(self, p, size=None): # <<<<<<<<<<<<<<
@@ -18002,7 +18834,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_95logseries(PyObject *__pyx_v_se
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logseries") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3925; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logseries") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3955; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -18017,7 +18849,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_95logseries(PyObject *__pyx_v_se
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("logseries", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3925; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("logseries", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3955; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.logseries", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -18043,7 +18875,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("logseries", 0);
- /* "mtrand.pyx":4002
+ /* "mtrand.pyx":4032
* cdef double fp
*
* fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<<
@@ -18052,7 +18884,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
*/
__pyx_v_fp = PyFloat_AsDouble(__pyx_v_p);
- /* "mtrand.pyx":4003
+ /* "mtrand.pyx":4033
*
* fp = PyFloat_AsDouble(p)
* if not PyErr_Occurred(): # <<<<<<<<<<<<<<
@@ -18062,7 +18894,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
__pyx_t_1 = (!PyErr_Occurred());
if (__pyx_t_1) {
- /* "mtrand.pyx":4004
+ /* "mtrand.pyx":4034
* fp = PyFloat_AsDouble(p)
* if not PyErr_Occurred():
* if fp <= 0.0: # <<<<<<<<<<<<<<
@@ -18072,23 +18904,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fp <= 0.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":4005
+ /* "mtrand.pyx":4035
* if not PyErr_Occurred():
* if fp <= 0.0:
* raise ValueError("p <= 0.0") # <<<<<<<<<<<<<<
* if fp >= 1.0:
* raise ValueError("p >= 1.0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_175), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_181), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":4006
+ /* "mtrand.pyx":4036
* if fp <= 0.0:
* raise ValueError("p <= 0.0")
* if fp >= 1.0: # <<<<<<<<<<<<<<
@@ -18098,23 +18930,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
__pyx_t_1 = (__pyx_v_fp >= 1.0);
if (__pyx_t_1) {
- /* "mtrand.pyx":4007
+ /* "mtrand.pyx":4037
* raise ValueError("p <= 0.0")
* if fp >= 1.0:
* raise ValueError("p >= 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_logseries, size, fp)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_177), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_183), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":4008
+ /* "mtrand.pyx":4038
* if fp >= 1.0:
* raise ValueError("p >= 1.0")
* return discd_array_sc(self.internal_state, rk_logseries, size, fp) # <<<<<<<<<<<<<<
@@ -18122,7 +18954,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
* PyErr_Clear()
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
@@ -18131,7 +18963,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
}
__pyx_L3:;
- /* "mtrand.pyx":4010
+ /* "mtrand.pyx":4040
* return discd_array_sc(self.internal_state, rk_logseries, size, fp)
*
* PyErr_Clear() # <<<<<<<<<<<<<<
@@ -18140,140 +18972,142 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
*/
PyErr_Clear();
- /* "mtrand.pyx":4012
+ /* "mtrand.pyx":4042
* PyErr_Clear()
*
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<<
* if np.any(np.less_equal(op, 0.0)):
* raise ValueError("p <= 0.0")
*/
- __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4042; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_op = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_op = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":4013
+ /* "mtrand.pyx":4043
*
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(op, 0.0)): # <<<<<<<<<<<<<<
* raise ValueError("p <= 0.0")
* if np.any(np.greater_equal(op, 1.0)):
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__less_equal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_op));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_op));
__Pyx_GIVEREF(((PyObject *)__pyx_v_op));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4013; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":4014
+ /* "mtrand.pyx":4044
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(op, 0.0)):
* raise ValueError("p <= 0.0") # <<<<<<<<<<<<<<
* if np.any(np.greater_equal(op, 1.0)):
* raise ValueError("p >= 1.0")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_178), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_184), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":4015
+ /* "mtrand.pyx":4045
* if np.any(np.less_equal(op, 0.0)):
* raise ValueError("p <= 0.0")
* if np.any(np.greater_equal(op, 1.0)): # <<<<<<<<<<<<<<
* raise ValueError("p >= 1.0")
* return discd_array(self.internal_state, rk_logseries, size, op)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__any); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__greater_equal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__greater_equal); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_op));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_op));
__Pyx_GIVEREF(((PyObject *)__pyx_v_op));
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4015; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_1) {
- /* "mtrand.pyx":4016
+ /* "mtrand.pyx":4046
* raise ValueError("p <= 0.0")
* if np.any(np.greater_equal(op, 1.0)):
* raise ValueError("p >= 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_logseries, size, op)
*
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_179), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_185), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
- /* "mtrand.pyx":4017
+ /* "mtrand.pyx":4047
* if np.any(np.greater_equal(op, 1.0)):
* raise ValueError("p >= 1.0")
* return discd_array(self.internal_state, rk_logseries, size, op) # <<<<<<<<<<<<<<
@@ -18281,10 +19115,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
* # Multivariate distributions:
*/
__Pyx_XDECREF(__pyx_r);
- __pyx_t_2 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_op); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_r = __pyx_t_2;
- __pyx_t_2 = 0;
+ __pyx_t_3 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4047; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
@@ -18305,11 +19139,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt
/* Python wrapper */
static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6mtrand_11RandomState_96multivariate_normal[] = "\n multivariate_normal(mean, cov[, size])\n\n Draw random samples from a multivariate normal distribution.\n\n The multivariate normal, multinormal or Gaussian distribution is a\n generalization of the one-dimensional normal distribution to higher\n dimensions. Such a distribution is specified by its mean and\n covariance matrix. These parameters are analogous to the mean\n (average or \"center\") and variance (standard deviation, or \"width,\"\n squared) of the one-dimensional normal distribution.\n\n Parameters\n ----------\n mean : 1-D array_like, of length N\n Mean of the N-dimensional distribution.\n cov : 2-D array_like, of shape (N, N)\n Covariance matrix of the distribution. Must be symmetric and\n positive semi-definite for \"physically meaningful\" results.\n size : tuple of ints, optional\n Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are\n generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because\n each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.\n If no shape is specified, a single (`N`-D) sample is returned.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Notes\n -----\n The mean is a coordinate in N-dimensional space, which represents the\n location where samples are most likely to be generated. This is\n analogous to the peak of the bell curve for the one-dimensional or\n univariate normal distribution.\n\n Covariance indicates the level to which two variables vary together.\n From the multivariate normal distribution, we draw ""N-dimensional\n samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix\n element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.\n The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its\n \"spread\").\n\n Instead of specifying the full covariance matrix, popular\n approximations include:\n\n - Spherical covariance (*cov* is a multiple of the identity matrix)\n - Diagonal covariance (*cov* has non-negative elements, and only on\n the diagonal)\n\n This geometrical property can be seen in two dimensions by plotting\n generated data-points:\n\n >>> mean = [0,0]\n >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis\n\n >>> import matplotlib.pyplot as plt\n >>> x,y = np.random.multivariate_normal(mean,cov,5000).T\n >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()\n\n Note that the covariance matrix must be non-negative definite.\n\n References\n ----------\n Papoulis, A., *Probability, Random Variables, and Stochastic Processes*,\n 3rd ed., New York: McGraw-Hill, 1991.\n\n Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*,\n 2nd ed., New York: Wiley, 2001.\n\n Examples\n --------\n >>> mean = (1,2)\n >>> cov = [[1,0],[1,0]]\n >>> x = np.random.multivariate_normal(mean,cov,(3,3))\n >>> x.shape\n (3, 3, 2)\n\n The following is probably true, given that 0.6 is roughly twice the\n standard deviation:\n\n >>> print list( (x[0,0,:] - mean) < 0.6 )\n [True, True]\n\n ";
+static char __pyx_doc_6mtrand_11RandomState_96multivariate_normal[] = "\n multivariate_normal(mean, cov[, size])\n\n Draw random samples from a multivariate normal distribution.\n\n The multivariate normal, multinormal or Gaussian distribution is a\n generalization of the one-dimensional normal distribution to higher\n dimensions. Such a distribution is specified by its mean and\n covariance matrix. These parameters are analogous to the mean\n (average or \"center\") and variance (standard deviation, or \"width,\"\n squared) of the one-dimensional normal distribution.\n\n Parameters\n ----------\n mean : 1-D array_like, of length N\n Mean of the N-dimensional distribution.\n cov : 2-D array_like, of shape (N, N)\n Covariance matrix of the distribution. Must be symmetric and\n positive semi-definite for \"physically meaningful\" results.\n size : int or tuple of ints, optional\n Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are\n generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because\n each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.\n If no shape is specified, a single (`N`-D) sample is returned.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Notes\n -----\n The mean is a coordinate in N-dimensional space, which represents the\n location where samples are most likely to be generated. This is\n analogous to the peak of the bell curve for the one-dimensional or\n univariate normal distribution.\n\n Covariance indicates the level to which two variables vary together.\n From the multivariate normal distribution, w""e draw N-dimensional\n samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix\n element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.\n The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its\n \"spread\").\n\n Instead of specifying the full covariance matrix, popular\n approximations include:\n\n - Spherical covariance (*cov* is a multiple of the identity matrix)\n - Diagonal covariance (*cov* has non-negative elements, and only on\n the diagonal)\n\n This geometrical property can be seen in two dimensions by plotting\n generated data-points:\n\n >>> mean = [0,0]\n >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis\n\n >>> import matplotlib.pyplot as plt\n >>> x,y = np.random.multivariate_normal(mean,cov,5000).T\n >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()\n\n Note that the covariance matrix must be non-negative definite.\n\n References\n ----------\n Papoulis, A., *Probability, Random Variables, and Stochastic Processes*,\n 3rd ed., New York: McGraw-Hill, 1991.\n\n Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*,\n 2nd ed., New York: Wiley, 2001.\n\n Examples\n --------\n >>> mean = (1,2)\n >>> cov = [[1,0],[1,0]]\n >>> x = np.random.multivariate_normal(mean,cov,(3,3))\n >>> x.shape\n (3, 3, 2)\n\n The following is probably true, given that 0.6 is roughly twice the\n standard deviation:\n\n >>> print list( (x[0,0,:] - mean) < 0.6 )\n [True, True]\n\n ";
static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_mean = 0;
PyObject *__pyx_v_cov = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("multivariate_normal (wrapper)", 0);
@@ -18317,7 +19154,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__mean,&__pyx_n_s__cov,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":4020
+ /* "mtrand.pyx":4050
*
* # Multivariate distributions:
* def multivariate_normal(self, mean, cov, size=None): # <<<<<<<<<<<<<<
@@ -18343,7 +19180,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__cov)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4020; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4050; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -18352,7 +19189,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multivariate_normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4020; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multivariate_normal") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4050; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -18369,7 +19206,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject *
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4020; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4050; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.multivariate_normal", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -18408,24 +19245,24 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__Pyx_INCREF(__pyx_v_mean);
__Pyx_INCREF(__pyx_v_cov);
- /* "mtrand.pyx":4112
+ /* "mtrand.pyx":4142
* """
* # Check preconditions on arguments
* mean = np.array(mean) # <<<<<<<<<<<<<<
* cov = np.array(cov)
* if size is None:
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__array); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_mean);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_mean);
__Pyx_GIVEREF(__pyx_v_mean);
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4112; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
@@ -18433,24 +19270,24 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_v_mean = __pyx_t_3;
__pyx_t_3 = 0;
- /* "mtrand.pyx":4113
+ /* "mtrand.pyx":4143
* # Check preconditions on arguments
* mean = np.array(mean)
* cov = np.array(cov) # <<<<<<<<<<<<<<
* if size is None:
* shape = []
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_cov);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_cov);
__Pyx_GIVEREF(__pyx_v_cov);
- __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4143; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
@@ -18458,7 +19295,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_v_cov = __pyx_t_2;
__pyx_t_2 = 0;
- /* "mtrand.pyx":4114
+ /* "mtrand.pyx":4144
* mean = np.array(mean)
* cov = np.array(cov)
* if size is None: # <<<<<<<<<<<<<<
@@ -18468,14 +19305,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_t_4 = (__pyx_v_size == Py_None);
if (__pyx_t_4) {
- /* "mtrand.pyx":4115
+ /* "mtrand.pyx":4145
* cov = np.array(cov)
* if size is None:
* shape = [] # <<<<<<<<<<<<<<
* else:
* shape = size
*/
- __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4115; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_shape = ((PyObject *)__pyx_t_2);
__pyx_t_2 = 0;
@@ -18483,7 +19320,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
}
/*else*/ {
- /* "mtrand.pyx":4117
+ /* "mtrand.pyx":4147
* shape = []
* else:
* shape = size # <<<<<<<<<<<<<<
@@ -18495,63 +19332,63 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
}
__pyx_L3:;
- /* "mtrand.pyx":4118
+ /* "mtrand.pyx":4148
* else:
* shape = size
* if len(mean.shape) != 1: # <<<<<<<<<<<<<<
* raise ValueError("mean must be 1 dimensional")
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
*/
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4118; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_4 = (__pyx_t_5 != 1);
if (__pyx_t_4) {
- /* "mtrand.pyx":4119
+ /* "mtrand.pyx":4149
* shape = size
* if len(mean.shape) != 1:
* raise ValueError("mean must be 1 dimensional") # <<<<<<<<<<<<<<
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
* raise ValueError("cov must be 2 dimensional and square")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_181), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_187), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L4;
}
__pyx_L4:;
- /* "mtrand.pyx":4120
+ /* "mtrand.pyx":4150
* if len(mean.shape) != 1:
* raise ValueError("mean must be 1 dimensional")
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): # <<<<<<<<<<<<<<
* raise ValueError("cov must be 2 dimensional and square")
* if mean.shape[0] != cov.shape[0]:
*/
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_4 = (__pyx_t_5 != 2);
if (!__pyx_t_4) {
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 1, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_7 = __pyx_t_6;
} else {
@@ -18559,83 +19396,97 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
}
if (__pyx_t_7) {
- /* "mtrand.pyx":4121
+ /* "mtrand.pyx":4151
* raise ValueError("mean must be 1 dimensional")
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
* raise ValueError("cov must be 2 dimensional and square") # <<<<<<<<<<<<<<
* if mean.shape[0] != cov.shape[0]:
* raise ValueError("mean and cov must have same length")
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_183), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_189), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
- /* "mtrand.pyx":4122
+ /* "mtrand.pyx":4152
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
* raise ValueError("cov must be 2 dimensional and square")
* if mean.shape[0] != cov.shape[0]: # <<<<<<<<<<<<<<
* raise ValueError("mean and cov must have same length")
* # Compute shape of output
*/
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_t_3, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_7 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_7) {
- /* "mtrand.pyx":4123
+ /* "mtrand.pyx":4153
* raise ValueError("cov must be 2 dimensional and square")
* if mean.shape[0] != cov.shape[0]:
* raise ValueError("mean and cov must have same length") # <<<<<<<<<<<<<<
* # Compute shape of output
- * if isinstance(shape, int):
+ * if isinstance(shape, (int, long, np.integer)):
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_185), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_191), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
- /* "mtrand.pyx":4125
+ /* "mtrand.pyx":4155
* raise ValueError("mean and cov must have same length")
* # Compute shape of output
- * if isinstance(shape, int): # <<<<<<<<<<<<<<
+ * if isinstance(shape, (int, long, np.integer)): # <<<<<<<<<<<<<<
* shape = [shape]
* final_shape = list(shape[:])
*/
- __pyx_t_2 = ((PyObject *)((PyObject*)(&PyInt_Type)));
- __Pyx_INCREF(__pyx_t_2);
- __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_shape, __pyx_t_2);
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__integer); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)((PyObject*)(&PyInt_Type))));
+ __Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
+ __Pyx_INCREF(((PyObject *)((PyObject*)(&PyLong_Type))));
+ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyLong_Type))));
+ __Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyLong_Type))));
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_7 = PyObject_IsInstance(__pyx_v_shape, ((PyObject *)__pyx_t_2)); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
if (__pyx_t_7) {
- /* "mtrand.pyx":4126
+ /* "mtrand.pyx":4156
* # Compute shape of output
- * if isinstance(shape, int):
+ * if isinstance(shape, (int, long, np.integer)):
* shape = [shape] # <<<<<<<<<<<<<<
* final_shape = list(shape[:])
* final_shape.append(mean.shape[0])
*/
- __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
@@ -18647,120 +19498,120 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
}
__pyx_L7:;
- /* "mtrand.pyx":4127
- * if isinstance(shape, int):
+ /* "mtrand.pyx":4157
+ * if isinstance(shape, (int, long, np.integer)):
* shape = [shape]
* final_shape = list(shape[:]) # <<<<<<<<<<<<<<
* final_shape.append(mean.shape[0])
* # Create a matrix of independent standard normally distributed random
*/
- __pyx_t_2 = __Pyx_PySequence_GetSlice(__pyx_v_shape, 0, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_shape, 0, 0, NULL, NULL, &__pyx_k_slice_192, 0, 0, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__pyx_v_final_shape = ((PyObject*)__pyx_t_2);
__pyx_t_2 = 0;
- /* "mtrand.pyx":4128
+ /* "mtrand.pyx":4158
* shape = [shape]
* final_shape = list(shape[:])
* final_shape.append(mean.shape[0]) # <<<<<<<<<<<<<<
* # Create a matrix of independent standard normally distributed random
* # numbers. The matrix has rows with the same length as mean and as
*/
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_2, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_8 = PyList_Append(__pyx_v_final_shape, __pyx_t_3); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4128; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_final_shape, __pyx_t_3); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- /* "mtrand.pyx":4132
+ /* "mtrand.pyx":4162
* # numbers. The matrix has rows with the same length as mean and as
* # many rows are necessary to form a matrix of shape final_shape.
* x = self.standard_normal(np.multiply.reduce(final_shape)) # <<<<<<<<<<<<<<
* x.shape = (np.multiply.reduce(final_shape[0:len(final_shape)-1]),
* mean.shape[0])
*/
- __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__multiply); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__multiply); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__reduce); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__reduce); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)__pyx_v_final_shape));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_final_shape));
__Pyx_GIVEREF(((PyObject *)__pyx_v_final_shape));
- __pyx_t_9 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
- __pyx_t_9 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
__pyx_v_x = __pyx_t_9;
__pyx_t_9 = 0;
- /* "mtrand.pyx":4133
+ /* "mtrand.pyx":4163
* # many rows are necessary to form a matrix of shape final_shape.
* x = self.standard_normal(np.multiply.reduce(final_shape))
* x.shape = (np.multiply.reduce(final_shape[0:len(final_shape)-1]), # <<<<<<<<<<<<<<
* mean.shape[0])
* # Transform matrix of standard normals into matrix where each row
*/
- __pyx_t_9 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__multiply); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s__multiply); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
- __pyx_t_9 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__reduce); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__reduce); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_5 = PyList_GET_SIZE(((PyObject *)__pyx_v_final_shape)); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = __Pyx_PySequence_GetSlice(((PyObject *)__pyx_v_final_shape), 0, (__pyx_t_5 - 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyList_GET_SIZE(((PyObject *)__pyx_v_final_shape)); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyList_GetSlice(((PyObject *)__pyx_v_final_shape), 0, (__pyx_t_5 - 1)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_1));
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1));
__Pyx_GIVEREF(((PyObject *)__pyx_t_1));
__pyx_t_1 = 0;
- __pyx_t_1 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- /* "mtrand.pyx":4134
+ /* "mtrand.pyx":4164
* x = self.standard_normal(np.multiply.reduce(final_shape))
* x.shape = (np.multiply.reduce(final_shape[0:len(final_shape)-1]),
* mean.shape[0]) # <<<<<<<<<<<<<<
* # Transform matrix of standard normals into matrix where each row
* # contains multivariate normals with the desired covariance.
*/
- __pyx_t_3 = PyObject_GetAttr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s__shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_3, 0, sizeof(long), PyInt_FromLong, 0, 0, 1); if (!__pyx_t_9) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
@@ -18769,55 +19620,51 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_t_1 = 0;
__pyx_t_9 = 0;
- /* "mtrand.pyx":4133
+ /* "mtrand.pyx":4163
* # many rows are necessary to form a matrix of shape final_shape.
* x = self.standard_normal(np.multiply.reduce(final_shape))
* x.shape = (np.multiply.reduce(final_shape[0:len(final_shape)-1]), # <<<<<<<<<<<<<<
* mean.shape[0])
* # Transform matrix of standard normals into matrix where each row
*/
- if (PyObject_SetAttr(__pyx_v_x, __pyx_n_s__shape, ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_PyObject_SetAttrStr(__pyx_v_x, __pyx_n_s__shape, ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- /* "mtrand.pyx":4142
+ /* "mtrand.pyx":4172
* # decomposition of cov is such an A.
*
* from numpy.dual import svd # <<<<<<<<<<<<<<
* # XXX: we really should be doing this by Cholesky decomposition
* (u,s,v) = svd(cov)
*/
- __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_n_s__svd));
PyList_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_n_s__svd));
__Pyx_GIVEREF(((PyObject *)__pyx_n_s__svd));
- __pyx_t_9 = __Pyx_Import(((PyObject *)__pyx_n_s_186), ((PyObject *)__pyx_t_3), -1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = __Pyx_Import(((PyObject *)__pyx_n_s_193), ((PyObject *)__pyx_t_3), -1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_9, __pyx_n_s__svd);
- if (__pyx_t_3 == NULL) {
- if (PyErr_ExceptionMatches(PyExc_AttributeError)) __Pyx_RaiseImportError(__pyx_n_s__svd);
- if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
+ __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_9, __pyx_n_s__svd); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_svd = __pyx_t_3;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
- /* "mtrand.pyx":4144
+ /* "mtrand.pyx":4174
* from numpy.dual import svd
* # XXX: we really should be doing this by Cholesky decomposition
* (u,s,v) = svd(cov) # <<<<<<<<<<<<<<
* x = np.dot(x*np.sqrt(s),v)
* # The rows of x now have the correct covariance but mean 0. Add
*/
- __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_INCREF(__pyx_v_cov);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_cov);
__Pyx_GIVEREF(__pyx_v_cov);
- __pyx_t_3 = PyObject_Call(__pyx_v_svd, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_v_svd, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) {
@@ -18830,7 +19677,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
if (unlikely(size != 3)) {
if (size > 3) __Pyx_RaiseTooManyValuesError(3);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(PyTuple_CheckExact(sequence))) {
@@ -18846,15 +19693,18 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(__pyx_t_2);
#else
- __pyx_t_9 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_2 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_1 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else
{
Py_ssize_t index = -1;
- __pyx_t_10 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_10 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_11 = Py_TYPE(__pyx_t_10)->tp_iternext;
@@ -18864,7 +19714,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__Pyx_GOTREF(__pyx_t_1);
index = 2; __pyx_t_2 = __pyx_t_11(__pyx_t_10); if (unlikely(!__pyx_t_2)) goto __pyx_L8_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
- if (__Pyx_IternextUnpackEndCheck(__pyx_t_11(__pyx_t_10), 3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_11(__pyx_t_10), 3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_11 = NULL;
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
goto __pyx_L9_unpacking_done;
@@ -18872,7 +19722,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_11 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4144; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_L9_unpacking_done:;
}
__pyx_v_u = __pyx_t_9;
@@ -18882,36 +19732,36 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_v_v = __pyx_t_2;
__pyx_t_2 = 0;
- /* "mtrand.pyx":4145
+ /* "mtrand.pyx":4175
* # XXX: we really should be doing this by Cholesky decomposition
* (u,s,v) = svd(cov)
* x = np.dot(x*np.sqrt(s),v) # <<<<<<<<<<<<<<
* # The rows of x now have the correct covariance but mean 0. Add
* # mean to each row. Then each row will have mean mean.
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__dot); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__dot); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__sqrt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__sqrt); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_s);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_s);
__Pyx_GIVEREF(__pyx_v_s);
- __pyx_t_9 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_t_3 = PyNumber_Multiply(__pyx_v_x, __pyx_t_9); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyNumber_Multiply(__pyx_v_x, __pyx_t_9); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
- __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
@@ -18919,7 +19769,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_v);
__Pyx_GIVEREF(__pyx_v_v);
__pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_9), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4175; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_9)); __pyx_t_9 = 0;
@@ -18927,19 +19777,19 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__pyx_v_x = __pyx_t_3;
__pyx_t_3 = 0;
- /* "mtrand.pyx":4148
+ /* "mtrand.pyx":4178
* # The rows of x now have the correct covariance but mean 0. Add
* # mean to each row. Then each row will have mean mean.
* np.add(mean,x,x) # <<<<<<<<<<<<<<
* x.shape = tuple(final_shape)
* return x
*/
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_9 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__add); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__add); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_mean);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_mean);
@@ -18950,25 +19800,25 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p
__Pyx_INCREF(__pyx_v_x);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_x);
__Pyx_GIVEREF(__pyx_v_x);
- __pyx_t_2 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4148; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_9, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- /* "mtrand.pyx":4149
+ /* "mtrand.pyx":4179
* # mean to each row. Then each row will have mean mean.
* np.add(mean,x,x)
* x.shape = tuple(final_shape) # <<<<<<<<<<<<<<
* return x
*
*/
- __pyx_t_2 = ((PyObject *)PyList_AsTuple(__pyx_v_final_shape)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = ((PyObject *)PyList_AsTuple(__pyx_v_final_shape)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_2));
- if (PyObject_SetAttr(__pyx_v_x, __pyx_n_s__shape, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_PyObject_SetAttrStr(__pyx_v_x, __pyx_n_s__shape, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- /* "mtrand.pyx":4150
+ /* "mtrand.pyx":4180
* np.add(mean,x,x)
* x.shape = tuple(final_shape)
* return x # <<<<<<<<<<<<<<
@@ -19012,6 +19862,9 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_
npy_intp __pyx_v_n;
PyObject *__pyx_v_pvals = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("multinomial (wrapper)", 0);
@@ -19019,7 +19872,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__n,&__pyx_n_s__pvals,&__pyx_n_s__size,0};
PyObject* values[3] = {0,0,0};
- /* "mtrand.pyx":4152
+ /* "mtrand.pyx":4182
* return x
*
* def multinomial(self, npy_intp n, object pvals, size=None): # <<<<<<<<<<<<<<
@@ -19045,7 +19898,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__pvals)) != 0)) kw_args--;
else {
- __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4182; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
@@ -19054,7 +19907,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multinomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multinomial") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4182; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -19065,13 +19918,13 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_
default: goto __pyx_L5_argtuple_error;
}
}
- __pyx_v_n = __Pyx_PyInt_from_py_npy_intp(values[0]); if (unlikely((__pyx_v_n == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_v_n = __Pyx_PyInt_from_py_npy_intp(values[0]); if (unlikely((__pyx_v_n == (npy_intp)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4182; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_pvals = values[1];
__pyx_v_size = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4152; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4182; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.multinomial", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -19098,8 +19951,8 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- int __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
long __pyx_t_6;
int __pyx_lineno = 0;
@@ -19107,30 +19960,32 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("multinomial", 0);
- /* "mtrand.pyx":4211
+ /* "mtrand.pyx":4241
* cdef double Sum
*
* d = len(pvals) # <<<<<<<<<<<<<<
* parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1)
* pix = <double*>PyArray_DATA(parr)
*/
- __pyx_t_1 = PyObject_Length(__pyx_v_pvals); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Length(__pyx_v_pvals); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_d = __pyx_t_1;
- /* "mtrand.pyx":4212
+ /* "mtrand.pyx":4242
*
* d = len(pvals)
* parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<<
* pix = <double*>PyArray_DATA(parr)
*
*/
- __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_pvals, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4212; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_pvals, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4242; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- arrayObject_parr = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ arrayObject_parr = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":4213
+ /* "mtrand.pyx":4243
* d = len(pvals)
* parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1)
* pix = <double*>PyArray_DATA(parr) # <<<<<<<<<<<<<<
@@ -19139,144 +19994,144 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_pix = ((double *)PyArray_DATA(arrayObject_parr));
- /* "mtrand.pyx":4215
+ /* "mtrand.pyx":4245
* pix = <double*>PyArray_DATA(parr)
*
* if kahan_sum(pix, d-1) > (1.0 + 1e-12): # <<<<<<<<<<<<<<
* raise ValueError("sum(pvals[:-1]) > 1.0")
*
*/
- __pyx_t_3 = (__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, (__pyx_v_d - 1)) > (1.0 + 1e-12));
- if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, (__pyx_v_d - 1)) > (1.0 + 1e-12));
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4216
+ /* "mtrand.pyx":4246
*
* if kahan_sum(pix, d-1) > (1.0 + 1e-12):
* raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<<
*
* if size is None:
*/
- __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_188), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_Raise(__pyx_t_2, 0, 0, 0);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_195), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4246; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4246; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
- /* "mtrand.pyx":4218
+ /* "mtrand.pyx":4248
* raise ValueError("sum(pvals[:-1]) > 1.0")
*
* if size is None: # <<<<<<<<<<<<<<
* shape = (d,)
* elif type(size) is int:
*/
- __pyx_t_3 = (__pyx_v_size == Py_None);
- if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_v_size == Py_None);
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4219
+ /* "mtrand.pyx":4249
*
* if size is None:
* shape = (d,) # <<<<<<<<<<<<<<
* elif type(size) is int:
* shape = (size, d)
*/
- __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4249; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4249; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_v_shape = ((PyObject *)__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_v_shape = ((PyObject *)__pyx_t_4);
- __pyx_t_4 = 0;
goto __pyx_L4;
}
- /* "mtrand.pyx":4220
+ /* "mtrand.pyx":4250
* if size is None:
* shape = (d,)
* elif type(size) is int: # <<<<<<<<<<<<<<
* shape = (size, d)
* else:
*/
- __pyx_t_3 = (((PyObject *)Py_TYPE(__pyx_v_size)) == ((PyObject *)((PyObject*)(&PyInt_Type))));
- if (__pyx_t_3) {
+ __pyx_t_4 = (((PyObject *)Py_TYPE(__pyx_v_size)) == ((PyObject *)((PyObject*)(&PyInt_Type))));
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4221
+ /* "mtrand.pyx":4251
* shape = (d,)
* elif type(size) is int:
* shape = (size, d) # <<<<<<<<<<<<<<
* else:
* shape = size + (d,)
*/
- __pyx_t_4 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4221; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4251; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4251; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_size);
__Pyx_GIVEREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __pyx_t_4 = 0;
- __pyx_v_shape = ((PyObject *)__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
+ __pyx_v_shape = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
goto __pyx_L4;
}
/*else*/ {
- /* "mtrand.pyx":4223
+ /* "mtrand.pyx":4253
* shape = (size, d)
* else:
* shape = size + (d,) # <<<<<<<<<<<<<<
*
* multin = np.zeros(shape, int)
*/
- __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyNumber_Add(__pyx_v_size, ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4223; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_v_shape = __pyx_t_2;
- __pyx_t_2 = 0;
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Add(__pyx_v_size, ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_v_shape = __pyx_t_3;
+ __pyx_t_3 = 0;
}
__pyx_L4:;
- /* "mtrand.pyx":4225
+ /* "mtrand.pyx":4255
* shape = size + (d,)
*
* multin = np.zeros(shape, int) # <<<<<<<<<<<<<<
* mnarr = <ndarray>multin
* mnix = <long*>PyArray_DATA(mnarr)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
+ PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)((PyObject*)(&PyInt_Type))));
__Pyx_GIVEREF(((PyObject *)((PyObject*)(&PyInt_Type))));
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__pyx_v_multin = __pyx_t_5;
__pyx_t_5 = 0;
- /* "mtrand.pyx":4226
+ /* "mtrand.pyx":4256
*
* multin = np.zeros(shape, int)
* mnarr = <ndarray>multin # <<<<<<<<<<<<<<
@@ -19286,7 +20141,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
__Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_multin)));
arrayObject_mnarr = ((PyArrayObject *)__pyx_v_multin);
- /* "mtrand.pyx":4227
+ /* "mtrand.pyx":4257
* multin = np.zeros(shape, int)
* mnarr = <ndarray>multin
* mnix = <long*>PyArray_DATA(mnarr) # <<<<<<<<<<<<<<
@@ -19295,7 +20150,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_mnix = ((long *)PyArray_DATA(arrayObject_mnarr));
- /* "mtrand.pyx":4228
+ /* "mtrand.pyx":4258
* mnarr = <ndarray>multin
* mnix = <long*>PyArray_DATA(mnarr)
* i = 0 # <<<<<<<<<<<<<<
@@ -19304,7 +20159,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_i = 0;
- /* "mtrand.pyx":4229
+ /* "mtrand.pyx":4259
* mnix = <long*>PyArray_DATA(mnarr)
* i = 0
* while i < PyArray_SIZE(mnarr): # <<<<<<<<<<<<<<
@@ -19312,10 +20167,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
* dn = n
*/
while (1) {
- __pyx_t_3 = (__pyx_v_i < PyArray_SIZE(arrayObject_mnarr));
- if (!__pyx_t_3) break;
+ __pyx_t_4 = (__pyx_v_i < PyArray_SIZE(arrayObject_mnarr));
+ if (!__pyx_t_4) break;
- /* "mtrand.pyx":4230
+ /* "mtrand.pyx":4260
* i = 0
* while i < PyArray_SIZE(mnarr):
* Sum = 1.0 # <<<<<<<<<<<<<<
@@ -19324,7 +20179,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_Sum = 1.0;
- /* "mtrand.pyx":4231
+ /* "mtrand.pyx":4261
* while i < PyArray_SIZE(mnarr):
* Sum = 1.0
* dn = n # <<<<<<<<<<<<<<
@@ -19333,7 +20188,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_dn = __pyx_v_n;
- /* "mtrand.pyx":4232
+ /* "mtrand.pyx":4262
* Sum = 1.0
* dn = n
* for j from 0 <= j < d-1: # <<<<<<<<<<<<<<
@@ -19343,7 +20198,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
__pyx_t_6 = (__pyx_v_d - 1);
for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_6; __pyx_v_j++) {
- /* "mtrand.pyx":4233
+ /* "mtrand.pyx":4263
* dn = n
* for j from 0 <= j < d-1:
* mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) # <<<<<<<<<<<<<<
@@ -19351,12 +20206,18 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
* if dn <= 0:
*/
if (unlikely(__pyx_v_Sum == 0)) {
+ #ifdef WITH_THREAD
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
+ #endif
PyErr_Format(PyExc_ZeroDivisionError, "float division");
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef WITH_THREAD
+ PyGILState_Release(__pyx_gilstate_save);
+ #endif
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4263; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
(__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)]) = rk_binomial(__pyx_v_self->internal_state, __pyx_v_dn, ((__pyx_v_pix[__pyx_v_j]) / __pyx_v_Sum));
- /* "mtrand.pyx":4234
+ /* "mtrand.pyx":4264
* for j from 0 <= j < d-1:
* mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum)
* dn = dn - mnix[i+j] # <<<<<<<<<<<<<<
@@ -19365,17 +20226,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
*/
__pyx_v_dn = (__pyx_v_dn - (__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)]));
- /* "mtrand.pyx":4235
+ /* "mtrand.pyx":4265
* mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum)
* dn = dn - mnix[i+j]
* if dn <= 0: # <<<<<<<<<<<<<<
* break
* Sum = Sum - pix[j]
*/
- __pyx_t_3 = (__pyx_v_dn <= 0);
- if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_v_dn <= 0);
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4236
+ /* "mtrand.pyx":4266
* dn = dn - mnix[i+j]
* if dn <= 0:
* break # <<<<<<<<<<<<<<
@@ -19387,7 +20248,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
}
__pyx_L9:;
- /* "mtrand.pyx":4237
+ /* "mtrand.pyx":4267
* if dn <= 0:
* break
* Sum = Sum - pix[j] # <<<<<<<<<<<<<<
@@ -19398,17 +20259,17 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
}
__pyx_L8_break:;
- /* "mtrand.pyx":4238
+ /* "mtrand.pyx":4268
* break
* Sum = Sum - pix[j]
* if dn > 0: # <<<<<<<<<<<<<<
* mnix[i+d-1] = dn
*
*/
- __pyx_t_3 = (__pyx_v_dn > 0);
- if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_v_dn > 0);
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4239
+ /* "mtrand.pyx":4269
* Sum = Sum - pix[j]
* if dn > 0:
* mnix[i+d-1] = dn # <<<<<<<<<<<<<<
@@ -19420,7 +20281,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
}
__pyx_L10:;
- /* "mtrand.pyx":4241
+ /* "mtrand.pyx":4271
* mnix[i+d-1] = dn
*
* i = i + d # <<<<<<<<<<<<<<
@@ -19430,7 +20291,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
__pyx_v_i = (__pyx_v_i + __pyx_v_d);
}
- /* "mtrand.pyx":4243
+ /* "mtrand.pyx":4273
* i = i + d
*
* return multin # <<<<<<<<<<<<<<
@@ -19446,7 +20307,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.RandomState.multinomial", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
@@ -19466,6 +20327,9 @@ static char __pyx_doc_6mtrand_11RandomState_100dirichlet[] = "\n dirichle
static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_alpha = 0;
PyObject *__pyx_v_size = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("dirichlet (wrapper)", 0);
@@ -19473,7 +20337,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_s
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__alpha,&__pyx_n_s__size,0};
PyObject* values[2] = {0,0};
- /* "mtrand.pyx":4245
+ /* "mtrand.pyx":4275
* return multin
*
* def dirichlet(self, object alpha, size=None): # <<<<<<<<<<<<<<
@@ -19502,7 +20366,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_s
}
}
if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dirichlet") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4245; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dirichlet") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4275; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
@@ -19517,7 +20381,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_s
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("dirichlet", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4245; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __Pyx_RaiseArgtupleInvalid("dirichlet", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4275; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("mtrand.RandomState.dirichlet", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
@@ -19545,8 +20409,8 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
- int __pyx_t_3;
- PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
npy_intp __pyx_t_6;
int __pyx_lineno = 0;
@@ -19554,30 +20418,32 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("dirichlet", 0);
- /* "mtrand.pyx":4331
+ /* "mtrand.pyx":4361
* cdef double acc, invacc
*
* k = len(alpha) # <<<<<<<<<<<<<<
* alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1)
* alpha_data = <double*>PyArray_DATA(alpha_arr)
*/
- __pyx_t_1 = PyObject_Length(__pyx_v_alpha); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Length(__pyx_v_alpha); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4361; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_k = __pyx_t_1;
- /* "mtrand.pyx":4332
+ /* "mtrand.pyx":4362
*
* k = len(alpha)
* alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<<
* alpha_data = <double*>PyArray_DATA(alpha_arr)
*
*/
- __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_alpha, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4332; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_alpha, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4362; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_t_2)));
- __pyx_v_alpha_arr = ((PyArrayObject *)__pyx_t_2);
+ __pyx_t_3 = __pyx_t_2;
+ __Pyx_INCREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_alpha_arr = ((PyArrayObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
- /* "mtrand.pyx":4333
+ /* "mtrand.pyx":4363
* k = len(alpha)
* alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1)
* alpha_data = <double*>PyArray_DATA(alpha_arr) # <<<<<<<<<<<<<<
@@ -19586,123 +20452,123 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
__pyx_v_alpha_data = ((double *)PyArray_DATA(__pyx_v_alpha_arr));
- /* "mtrand.pyx":4335
+ /* "mtrand.pyx":4365
* alpha_data = <double*>PyArray_DATA(alpha_arr)
*
* if size is None: # <<<<<<<<<<<<<<
* shape = (k,)
* elif type(size) is int:
*/
- __pyx_t_3 = (__pyx_v_size == Py_None);
- if (__pyx_t_3) {
+ __pyx_t_4 = (__pyx_v_size == Py_None);
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4336
+ /* "mtrand.pyx":4366
*
* if size is None:
* shape = (k,) # <<<<<<<<<<<<<<
* elif type(size) is int:
* shape = (size, k)
*/
- __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4336; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_v_shape = ((PyObject *)__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_v_shape = ((PyObject *)__pyx_t_4);
- __pyx_t_4 = 0;
goto __pyx_L3;
}
- /* "mtrand.pyx":4337
+ /* "mtrand.pyx":4367
* if size is None:
* shape = (k,)
* elif type(size) is int: # <<<<<<<<<<<<<<
* shape = (size, k)
* else:
*/
- __pyx_t_3 = (((PyObject *)Py_TYPE(__pyx_v_size)) == ((PyObject *)((PyObject*)(&PyInt_Type))));
- if (__pyx_t_3) {
+ __pyx_t_4 = (((PyObject *)Py_TYPE(__pyx_v_size)) == ((PyObject *)((PyObject*)(&PyInt_Type))));
+ if (__pyx_t_4) {
- /* "mtrand.pyx":4338
+ /* "mtrand.pyx":4368
* shape = (k,)
* elif type(size) is int:
* shape = (size, k) # <<<<<<<<<<<<<<
* else:
* shape = size + (k,)
*/
- __pyx_t_4 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4338; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4338; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4368; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_size);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_size);
__Pyx_GIVEREF(__pyx_v_size);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __pyx_t_4 = 0;
- __pyx_v_shape = ((PyObject *)__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
+ __pyx_v_shape = ((PyObject *)__pyx_t_3);
+ __pyx_t_3 = 0;
goto __pyx_L3;
}
/*else*/ {
- /* "mtrand.pyx":4340
+ /* "mtrand.pyx":4370
* shape = (size, k)
* else:
* shape = size + (k,) # <<<<<<<<<<<<<<
*
* diric = np.zeros(shape, np.float64)
*/
- __pyx_t_2 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyNumber_Add(__pyx_v_size, ((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4340; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
- __pyx_v_shape = __pyx_t_2;
- __pyx_t_2 = 0;
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = PyNumber_Add(__pyx_v_size, ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __pyx_v_shape = __pyx_t_3;
+ __pyx_t_3 = 0;
}
__pyx_L3:;
- /* "mtrand.pyx":4342
+ /* "mtrand.pyx":4372
* shape = size + (k,)
*
* diric = np.zeros(shape, np.float64) # <<<<<<<<<<<<<<
* val_arr = <ndarray>diric
* val_data= <double*>PyArray_DATA(val_arr)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s__float64); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
- __pyx_t_5 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4342; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__pyx_v_diric = __pyx_t_5;
__pyx_t_5 = 0;
- /* "mtrand.pyx":4343
+ /* "mtrand.pyx":4373
*
* diric = np.zeros(shape, np.float64)
* val_arr = <ndarray>diric # <<<<<<<<<<<<<<
@@ -19712,7 +20578,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_diric)));
__pyx_v_val_arr = ((PyArrayObject *)__pyx_v_diric);
- /* "mtrand.pyx":4344
+ /* "mtrand.pyx":4374
* diric = np.zeros(shape, np.float64)
* val_arr = <ndarray>diric
* val_data= <double*>PyArray_DATA(val_arr) # <<<<<<<<<<<<<<
@@ -19721,7 +20587,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
__pyx_v_val_data = ((double *)PyArray_DATA(__pyx_v_val_arr));
- /* "mtrand.pyx":4346
+ /* "mtrand.pyx":4376
* val_data= <double*>PyArray_DATA(val_arr)
*
* i = 0 # <<<<<<<<<<<<<<
@@ -19730,7 +20596,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
__pyx_v_i = 0;
- /* "mtrand.pyx":4347
+ /* "mtrand.pyx":4377
*
* i = 0
* totsize = PyArray_SIZE(val_arr) # <<<<<<<<<<<<<<
@@ -19739,7 +20605,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
__pyx_v_totsize = PyArray_SIZE(__pyx_v_val_arr);
- /* "mtrand.pyx":4348
+ /* "mtrand.pyx":4378
* i = 0
* totsize = PyArray_SIZE(val_arr)
* while i < totsize: # <<<<<<<<<<<<<<
@@ -19747,10 +20613,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
* for j from 0 <= j < k:
*/
while (1) {
- __pyx_t_3 = (__pyx_v_i < __pyx_v_totsize);
- if (!__pyx_t_3) break;
+ __pyx_t_4 = (__pyx_v_i < __pyx_v_totsize);
+ if (!__pyx_t_4) break;
- /* "mtrand.pyx":4349
+ /* "mtrand.pyx":4379
* totsize = PyArray_SIZE(val_arr)
* while i < totsize:
* acc = 0.0 # <<<<<<<<<<<<<<
@@ -19759,7 +20625,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
__pyx_v_acc = 0.0;
- /* "mtrand.pyx":4350
+ /* "mtrand.pyx":4380
* while i < totsize:
* acc = 0.0
* for j from 0 <= j < k: # <<<<<<<<<<<<<<
@@ -19769,7 +20635,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__pyx_t_6 = __pyx_v_k;
for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_6; __pyx_v_j++) {
- /* "mtrand.pyx":4351
+ /* "mtrand.pyx":4381
* acc = 0.0
* for j from 0 <= j < k:
* val_data[i+j] = rk_standard_gamma(self.internal_state, alpha_data[j]) # <<<<<<<<<<<<<<
@@ -19778,7 +20644,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
*/
(__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) = rk_standard_gamma(__pyx_v_self->internal_state, (__pyx_v_alpha_data[__pyx_v_j]));
- /* "mtrand.pyx":4352
+ /* "mtrand.pyx":4382
* for j from 0 <= j < k:
* val_data[i+j] = rk_standard_gamma(self.internal_state, alpha_data[j])
* acc = acc + val_data[i+j] # <<<<<<<<<<<<<<
@@ -19788,7 +20654,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__pyx_v_acc = (__pyx_v_acc + (__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]));
}
- /* "mtrand.pyx":4353
+ /* "mtrand.pyx":4383
* val_data[i+j] = rk_standard_gamma(self.internal_state, alpha_data[j])
* acc = acc + val_data[i+j]
* invacc = 1/acc # <<<<<<<<<<<<<<
@@ -19796,12 +20662,18 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
* val_data[i+j] = val_data[i+j] * invacc
*/
if (unlikely(__pyx_v_acc == 0)) {
+ #ifdef WITH_THREAD
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
+ #endif
PyErr_Format(PyExc_ZeroDivisionError, "float division");
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #ifdef WITH_THREAD
+ PyGILState_Release(__pyx_gilstate_save);
+ #endif
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4383; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_invacc = (1.0 / __pyx_v_acc);
- /* "mtrand.pyx":4354
+ /* "mtrand.pyx":4384
* acc = acc + val_data[i+j]
* invacc = 1/acc
* for j from 0 <= j < k: # <<<<<<<<<<<<<<
@@ -19811,7 +20683,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__pyx_t_6 = __pyx_v_k;
for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_6; __pyx_v_j++) {
- /* "mtrand.pyx":4355
+ /* "mtrand.pyx":4385
* invacc = 1/acc
* for j from 0 <= j < k:
* val_data[i+j] = val_data[i+j] * invacc # <<<<<<<<<<<<<<
@@ -19821,7 +20693,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
(__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) = ((__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) * __pyx_v_invacc);
}
- /* "mtrand.pyx":4356
+ /* "mtrand.pyx":4386
* for j from 0 <= j < k:
* val_data[i+j] = val_data[i+j] * invacc
* i = i + k # <<<<<<<<<<<<<<
@@ -19831,7 +20703,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
__pyx_v_i = (__pyx_v_i + __pyx_v_k);
}
- /* "mtrand.pyx":4358
+ /* "mtrand.pyx":4388
* i = i + k
*
* return diric # <<<<<<<<<<<<<<
@@ -19847,7 +20719,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("mtrand.RandomState.dirichlet", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
@@ -19873,7 +20745,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_103shuffle(PyObject *__pyx_v_sel
return __pyx_r;
}
-/* "mtrand.pyx":4361
+/* "mtrand.pyx":4391
*
* # Shuffling and permutations:
* def shuffle(self, object x): # <<<<<<<<<<<<<<
@@ -19900,36 +20772,36 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("shuffle", 0);
- /* "mtrand.pyx":4396
+ /* "mtrand.pyx":4426
* cdef npy_intp i, j
*
* i = len(x) - 1 # <<<<<<<<<<<<<<
*
* # Logic adapted from random.shuffle()
*/
- __pyx_t_1 = PyObject_Length(__pyx_v_x); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Length(__pyx_v_x); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4426; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_i = (__pyx_t_1 - 1);
- /* "mtrand.pyx":4399
+ /* "mtrand.pyx":4429
*
* # Logic adapted from random.shuffle()
* if isinstance(x, np.ndarray) and x.ndim > 1: # <<<<<<<<<<<<<<
* # For a multi-dimensional ndarray, indexing returns a view onto
* # each row. So we can't just use ordinary assignment to swap the
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__ndarray); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_4) {
- __pyx_t_3 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4429; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __pyx_t_5;
} else {
@@ -19937,35 +20809,35 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
}
if (__pyx_t_6) {
- /* "mtrand.pyx":4403
+ /* "mtrand.pyx":4433
* # each row. So we can't just use ordinary assignment to swap the
* # rows; we need a bounce buffer.
* buf = np.empty(x.shape[1:], dtype=x.dtype) # <<<<<<<<<<<<<<
* while i > 0:
* j = rk_interval(i, self.internal_state)
*/
- __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s__empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s__shape); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_7 = __Pyx_PySequence_GetSlice(__pyx_t_2, 1, PY_SSIZE_T_MAX); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = __Pyx_PyObject_GetSlice(__pyx_t_2, 1, 0, NULL, NULL, &__pyx_k_slice_196, 1, 0, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_7 = 0;
- __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_7));
- __pyx_t_8 = PyObject_GetAttr(__pyx_v_x, __pyx_n_s__dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s__dtype); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
- if (PyDict_SetItem(__pyx_t_7, ((PyObject *)__pyx_n_s__dtype), __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_7, ((PyObject *)__pyx_n_s__dtype), __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- __pyx_t_8 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_7)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), ((PyObject *)__pyx_t_7)); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
@@ -19973,7 +20845,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
__pyx_v_buf = __pyx_t_8;
__pyx_t_8 = 0;
- /* "mtrand.pyx":4404
+ /* "mtrand.pyx":4434
* # rows; we need a bounce buffer.
* buf = np.empty(x.shape[1:], dtype=x.dtype)
* while i > 0: # <<<<<<<<<<<<<<
@@ -19984,7 +20856,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
__pyx_t_6 = (__pyx_v_i > 0);
if (!__pyx_t_6) break;
- /* "mtrand.pyx":4405
+ /* "mtrand.pyx":4435
* buf = np.empty(x.shape[1:], dtype=x.dtype)
* while i > 0:
* j = rk_interval(i, self.internal_state) # <<<<<<<<<<<<<<
@@ -19993,40 +20865,40 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
*/
__pyx_v_j = rk_interval(__pyx_v_i, __pyx_v_self->internal_state);
- /* "mtrand.pyx":4406
+ /* "mtrand.pyx":4436
* while i > 0:
* j = rk_interval(i, self.internal_state)
* buf[...] = x[j] # <<<<<<<<<<<<<<
* x[j] = x[i]
* x[i] = buf
*/
- __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4436; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
- if (PyObject_SetItem(__pyx_v_buf, Py_Ellipsis, __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4406; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyObject_SetItem(__pyx_v_buf, Py_Ellipsis, __pyx_t_8) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4436; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- /* "mtrand.pyx":4407
+ /* "mtrand.pyx":4437
* j = rk_interval(i, self.internal_state)
* buf[...] = x[j]
* x[j] = x[i] # <<<<<<<<<<<<<<
* x[i] = buf
* i = i - 1
*/
- __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4437; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
- if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_8, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4407; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_8, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4437; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- /* "mtrand.pyx":4408
+ /* "mtrand.pyx":4438
* buf[...] = x[j]
* x[j] = x[i]
* x[i] = buf # <<<<<<<<<<<<<<
* i = i - 1
* else:
*/
- if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_v_buf, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4408; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_v_buf, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4438; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- /* "mtrand.pyx":4409
+ /* "mtrand.pyx":4439
* x[j] = x[i]
* x[i] = buf
* i = i - 1 # <<<<<<<<<<<<<<
@@ -20039,7 +20911,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
}
/*else*/ {
- /* "mtrand.pyx":4414
+ /* "mtrand.pyx":4444
* # sequence types, indexing returns a real object that's
* # independent of the array contents, so we can just swap directly.
* while i > 0: # <<<<<<<<<<<<<<
@@ -20050,7 +20922,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
__pyx_t_6 = (__pyx_v_i > 0);
if (!__pyx_t_6) break;
- /* "mtrand.pyx":4415
+ /* "mtrand.pyx":4445
* # independent of the array contents, so we can just swap directly.
* while i > 0:
* j = rk_interval(i, self.internal_state) # <<<<<<<<<<<<<<
@@ -20059,23 +20931,23 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr
*/
__pyx_v_j = rk_interval(__pyx_v_i, __pyx_v_self->internal_state);
- /* "mtrand.pyx":4416
+ /* "mtrand.pyx":4446
* while i > 0:
* j = rk_interval(i, self.internal_state)
* x[i], x[j] = x[j], x[i] # <<<<<<<<<<<<<<
* i = i - 1
*
*/
- __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
- __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
- if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_t_8, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_t_8, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
- if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_7, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_7, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- /* "mtrand.pyx":4417
+ /* "mtrand.pyx":4447
* j = rk_interval(i, self.internal_state)
* x[i], x[j] = x[j], x[i]
* i = i - 1 # <<<<<<<<<<<<<<
@@ -20115,7 +20987,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_105permutation(PyObject *__pyx_v
return __pyx_r;
}
-/* "mtrand.pyx":4419
+/* "mtrand.pyx":4449
* i = i - 1
*
* def permutation(self, object x): # <<<<<<<<<<<<<<
@@ -20136,19 +21008,19 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("permutation", 0);
- /* "mtrand.pyx":4455
+ /* "mtrand.pyx":4485
*
* """
* if isinstance(x, (int, long, np.integer)): # <<<<<<<<<<<<<<
* arr = np.arange(x)
* else:
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__integer); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__integer); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)((PyObject*)(&PyInt_Type))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)((PyObject*)(&PyInt_Type))));
@@ -20159,28 +21031,28 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
- __pyx_t_3 = PyObject_IsInstance(__pyx_v_x, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4455; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = PyObject_IsInstance(__pyx_v_x, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
if (__pyx_t_3) {
- /* "mtrand.pyx":4456
+ /* "mtrand.pyx":4486
* """
* if isinstance(x, (int, long, np.integer)):
* arr = np.arange(x) # <<<<<<<<<<<<<<
* else:
* arr = np.array(x)
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__arange); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__arange); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_x);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_x);
__Pyx_GIVEREF(__pyx_v_x);
- __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
@@ -20190,24 +21062,24 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_
}
/*else*/ {
- /* "mtrand.pyx":4458
+ /* "mtrand.pyx":4488
* arr = np.arange(x)
* else:
* arr = np.array(x) # <<<<<<<<<<<<<<
* self.shuffle(arr)
* return arr
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__array); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_x);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_x);
__Pyx_GIVEREF(__pyx_v_x);
- __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4458; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
@@ -20216,27 +21088,27 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_
}
__pyx_L3:;
- /* "mtrand.pyx":4459
+ /* "mtrand.pyx":4489
* else:
* arr = np.array(x)
* self.shuffle(arr) # <<<<<<<<<<<<<<
* return arr
*
*/
- __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__shuffle); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4459; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__shuffle); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4459; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_v_arr);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_arr);
__Pyx_GIVEREF(__pyx_v_arr);
- __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4459; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4460
+ /* "mtrand.pyx":4490
* arr = np.array(x)
* self.shuffle(arr)
* return arr # <<<<<<<<<<<<<<
@@ -20264,8 +21136,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_
}
static PyObject *__pyx_tp_new_6mtrand_RandomState(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
- PyObject *o = (*t->tp_alloc)(t, 0);
- if (!o) return 0;
+ PyObject *o;
+ o = (*t->tp_alloc)(t, 0);
+ if (unlikely(!o)) return 0;
return o;
}
@@ -20459,7 +21332,7 @@ static PyTypeObject __pyx_type_6mtrand_RandomState = {
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_RandomState, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
__Pyx_DOCSTR("\n RandomState(seed=None)\n\n Container for the Mersenne Twister pseudo-random number generator.\n\n `RandomState` exposes a number of methods for generating random numbers\n drawn from a variety of probability distributions. In addition to the\n distribution-specific arguments, each method takes a keyword argument\n `size` that defaults to ``None``. If `size` is ``None``, then a single\n value is generated and returned. If `size` is an integer, then a 1-D\n array filled with generated values is returned. If `size` is a tuple,\n then an array with that shape is filled and returned.\n\n Parameters\n ----------\n seed : int or array_like, optional\n Random seed initializing the pseudo-random number generator.\n Can be an integer, an array (or other sequence) of integers of\n any length, or ``None`` (the default).\n If `seed` is ``None``, then `RandomState` will try to read data from\n ``/dev/urandom`` (or the Windows analogue) if available or seed from\n the clock otherwise.\n\n Notes\n -----\n The Python stdlib module \"random\" also contains a Mersenne Twister\n pseudo-random number generator with a number of methods that are similar\n to the ones available in `RandomState`. `RandomState`, besides being\n NumPy-aware, has the advantage that it provides a much larger number\n of probability distributions to choose from.\n\n "), /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
@@ -20497,7 +21370,11 @@ static PyMethodDef __pyx_methods[] = {
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
+ #if PY_VERSION_HEX < 0x03020000
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
+ #else
PyModuleDef_HEAD_INIT,
+ #endif
__Pyx_NAMESTR("mtrand"),
0, /* m_doc */
-1, /* m_size */
@@ -20511,49 +21388,41 @@ static struct PyModuleDef __pyx_moduledef = {
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
- {&__pyx_kp_s_107, __pyx_k_107, sizeof(__pyx_k_107), 0, 0, 1, 0},
- {&__pyx_kp_s_109, __pyx_k_109, sizeof(__pyx_k_109), 0, 0, 1, 0},
- {&__pyx_kp_s_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 0, 1, 0},
- {&__pyx_kp_s_113, __pyx_k_113, sizeof(__pyx_k_113), 0, 0, 1, 0},
- {&__pyx_kp_s_115, __pyx_k_115, sizeof(__pyx_k_115), 0, 0, 1, 0},
+ {&__pyx_kp_s_112, __pyx_k_112, sizeof(__pyx_k_112), 0, 0, 1, 0},
+ {&__pyx_kp_s_114, __pyx_k_114, sizeof(__pyx_k_114), 0, 0, 1, 0},
{&__pyx_kp_s_118, __pyx_k_118, sizeof(__pyx_k_118), 0, 0, 1, 0},
- {&__pyx_kp_s_121, __pyx_k_121, sizeof(__pyx_k_121), 0, 0, 1, 0},
+ {&__pyx_kp_s_120, __pyx_k_120, sizeof(__pyx_k_120), 0, 0, 1, 0},
{&__pyx_kp_s_123, __pyx_k_123, sizeof(__pyx_k_123), 0, 0, 1, 0},
- {&__pyx_kp_s_125, __pyx_k_125, sizeof(__pyx_k_125), 0, 0, 1, 0},
+ {&__pyx_kp_s_126, __pyx_k_126, sizeof(__pyx_k_126), 0, 0, 1, 0},
+ {&__pyx_kp_s_128, __pyx_k_128, sizeof(__pyx_k_128), 0, 0, 1, 0},
{&__pyx_kp_s_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 0, 1, 0},
{&__pyx_kp_s_130, __pyx_k_130, sizeof(__pyx_k_130), 0, 0, 1, 0},
- {&__pyx_kp_s_132, __pyx_k_132, sizeof(__pyx_k_132), 0, 0, 1, 0},
- {&__pyx_kp_s_134, __pyx_k_134, sizeof(__pyx_k_134), 0, 0, 1, 0},
- {&__pyx_kp_s_146, __pyx_k_146, sizeof(__pyx_k_146), 0, 0, 1, 0},
- {&__pyx_kp_s_148, __pyx_k_148, sizeof(__pyx_k_148), 0, 0, 1, 0},
- {&__pyx_kp_s_151, __pyx_k_151, sizeof(__pyx_k_151), 0, 0, 1, 0},
- {&__pyx_kp_s_153, __pyx_k_153, sizeof(__pyx_k_153), 0, 0, 1, 0},
- {&__pyx_kp_s_156, __pyx_k_156, sizeof(__pyx_k_156), 0, 0, 1, 0},
- {&__pyx_kp_s_158, __pyx_k_158, sizeof(__pyx_k_158), 0, 0, 1, 0},
- {&__pyx_kp_s_16, __pyx_k_16, sizeof(__pyx_k_16), 0, 0, 1, 0},
+ {&__pyx_kp_s_135, __pyx_k_135, sizeof(__pyx_k_135), 0, 0, 1, 0},
+ {&__pyx_kp_s_137, __pyx_k_137, sizeof(__pyx_k_137), 0, 0, 1, 0},
+ {&__pyx_kp_s_139, __pyx_k_139, sizeof(__pyx_k_139), 0, 0, 1, 0},
+ {&__pyx_kp_s_144, __pyx_k_144, sizeof(__pyx_k_144), 0, 0, 1, 0},
+ {&__pyx_kp_s_15, __pyx_k_15, sizeof(__pyx_k_15), 0, 0, 1, 0},
+ {&__pyx_kp_s_152, __pyx_k_152, sizeof(__pyx_k_152), 0, 0, 1, 0},
+ {&__pyx_kp_s_154, __pyx_k_154, sizeof(__pyx_k_154), 0, 0, 1, 0},
+ {&__pyx_kp_s_157, __pyx_k_157, sizeof(__pyx_k_157), 0, 0, 1, 0},
+ {&__pyx_kp_s_159, __pyx_k_159, sizeof(__pyx_k_159), 0, 0, 1, 0},
{&__pyx_kp_s_162, __pyx_k_162, sizeof(__pyx_k_162), 0, 0, 1, 0},
{&__pyx_kp_s_164, __pyx_k_164, sizeof(__pyx_k_164), 0, 0, 1, 0},
- {&__pyx_kp_s_166, __pyx_k_166, sizeof(__pyx_k_166), 0, 0, 1, 0},
{&__pyx_kp_s_168, __pyx_k_168, sizeof(__pyx_k_168), 0, 0, 1, 0},
+ {&__pyx_kp_s_170, __pyx_k_170, sizeof(__pyx_k_170), 0, 0, 1, 0},
+ {&__pyx_kp_s_172, __pyx_k_172, sizeof(__pyx_k_172), 0, 0, 1, 0},
{&__pyx_kp_s_174, __pyx_k_174, sizeof(__pyx_k_174), 0, 0, 1, 0},
- {&__pyx_kp_s_176, __pyx_k_176, sizeof(__pyx_k_176), 0, 0, 1, 0},
{&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0},
{&__pyx_kp_s_180, __pyx_k_180, sizeof(__pyx_k_180), 0, 0, 1, 0},
{&__pyx_kp_s_182, __pyx_k_182, sizeof(__pyx_k_182), 0, 0, 1, 0},
- {&__pyx_kp_s_184, __pyx_k_184, sizeof(__pyx_k_184), 0, 0, 1, 0},
- {&__pyx_n_s_186, __pyx_k_186, sizeof(__pyx_k_186), 0, 0, 1, 1},
- {&__pyx_kp_s_187, __pyx_k_187, sizeof(__pyx_k_187), 0, 0, 1, 0},
- {&__pyx_n_s_191, __pyx_k_191, sizeof(__pyx_k_191), 0, 0, 1, 1},
- {&__pyx_n_s_192, __pyx_k_192, sizeof(__pyx_k_192), 0, 0, 1, 1},
- {&__pyx_kp_u_193, __pyx_k_193, sizeof(__pyx_k_193), 0, 1, 0, 0},
- {&__pyx_kp_u_194, __pyx_k_194, sizeof(__pyx_k_194), 0, 1, 0, 0},
- {&__pyx_kp_u_195, __pyx_k_195, sizeof(__pyx_k_195), 0, 1, 0, 0},
- {&__pyx_kp_u_196, __pyx_k_196, sizeof(__pyx_k_196), 0, 1, 0, 0},
- {&__pyx_kp_u_197, __pyx_k_197, sizeof(__pyx_k_197), 0, 1, 0, 0},
- {&__pyx_kp_u_198, __pyx_k_198, sizeof(__pyx_k_198), 0, 1, 0, 0},
- {&__pyx_kp_u_199, __pyx_k_199, sizeof(__pyx_k_199), 0, 1, 0, 0},
+ {&__pyx_kp_s_186, __pyx_k_186, sizeof(__pyx_k_186), 0, 0, 1, 0},
+ {&__pyx_kp_s_188, __pyx_k_188, sizeof(__pyx_k_188), 0, 0, 1, 0},
+ {&__pyx_kp_s_190, __pyx_k_190, sizeof(__pyx_k_190), 0, 0, 1, 0},
+ {&__pyx_n_s_193, __pyx_k_193, sizeof(__pyx_k_193), 0, 0, 1, 1},
+ {&__pyx_kp_s_194, __pyx_k_194, sizeof(__pyx_k_194), 0, 0, 1, 0},
+ {&__pyx_n_s_199, __pyx_k_199, sizeof(__pyx_k_199), 0, 0, 1, 1},
{&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0},
- {&__pyx_kp_u_200, __pyx_k_200, sizeof(__pyx_k_200), 0, 1, 0, 0},
+ {&__pyx_n_s_200, __pyx_k_200, sizeof(__pyx_k_200), 0, 0, 1, 1},
{&__pyx_kp_u_201, __pyx_k_201, sizeof(__pyx_k_201), 0, 1, 0, 0},
{&__pyx_kp_u_202, __pyx_k_202, sizeof(__pyx_k_202), 0, 1, 0, 0},
{&__pyx_kp_u_203, __pyx_k_203, sizeof(__pyx_k_203), 0, 1, 0, 0},
@@ -20635,28 +21504,37 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_u_276, __pyx_k_276, sizeof(__pyx_k_276), 0, 1, 0, 0},
{&__pyx_kp_u_277, __pyx_k_277, sizeof(__pyx_k_277), 0, 1, 0, 0},
{&__pyx_kp_u_278, __pyx_k_278, sizeof(__pyx_k_278), 0, 1, 0, 0},
+ {&__pyx_kp_u_279, __pyx_k_279, sizeof(__pyx_k_279), 0, 1, 0, 0},
{&__pyx_kp_s_28, __pyx_k_28, sizeof(__pyx_k_28), 0, 0, 1, 0},
+ {&__pyx_kp_u_280, __pyx_k_280, sizeof(__pyx_k_280), 0, 1, 0, 0},
+ {&__pyx_kp_u_281, __pyx_k_281, sizeof(__pyx_k_281), 0, 1, 0, 0},
+ {&__pyx_kp_u_282, __pyx_k_282, sizeof(__pyx_k_282), 0, 1, 0, 0},
+ {&__pyx_kp_u_283, __pyx_k_283, sizeof(__pyx_k_283), 0, 1, 0, 0},
+ {&__pyx_kp_u_284, __pyx_k_284, sizeof(__pyx_k_284), 0, 1, 0, 0},
+ {&__pyx_kp_u_285, __pyx_k_285, sizeof(__pyx_k_285), 0, 1, 0, 0},
+ {&__pyx_kp_u_286, __pyx_k_286, sizeof(__pyx_k_286), 0, 1, 0, 0},
{&__pyx_kp_s_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 0, 1, 0},
- {&__pyx_kp_s_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 0, 1, 0},
{&__pyx_kp_s_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 0, 1, 0},
- {&__pyx_kp_s_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 0, 1, 0},
- {&__pyx_kp_s_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 0, 1, 0},
- {&__pyx_kp_s_42, __pyx_k_42, sizeof(__pyx_k_42), 0, 0, 1, 0},
+ {&__pyx_kp_s_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 0, 1, 0},
+ {&__pyx_kp_s_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 0, 1, 0},
{&__pyx_kp_s_44, __pyx_k_44, sizeof(__pyx_k_44), 0, 0, 1, 0},
- {&__pyx_kp_s_51, __pyx_k_51, sizeof(__pyx_k_51), 0, 0, 1, 0},
- {&__pyx_kp_s_61, __pyx_k_61, sizeof(__pyx_k_61), 0, 0, 1, 0},
- {&__pyx_kp_s_63, __pyx_k_63, sizeof(__pyx_k_63), 0, 0, 1, 0},
- {&__pyx_kp_s_65, __pyx_k_65, sizeof(__pyx_k_65), 0, 0, 1, 0},
+ {&__pyx_kp_s_47, __pyx_k_47, sizeof(__pyx_k_47), 0, 0, 1, 0},
+ {&__pyx_kp_s_49, __pyx_k_49, sizeof(__pyx_k_49), 0, 0, 1, 0},
+ {&__pyx_kp_s_56, __pyx_k_56, sizeof(__pyx_k_56), 0, 0, 1, 0},
+ {&__pyx_kp_s_66, __pyx_k_66, sizeof(__pyx_k_66), 0, 0, 1, 0},
{&__pyx_kp_s_68, __pyx_k_68, sizeof(__pyx_k_68), 0, 0, 1, 0},
+ {&__pyx_kp_s_70, __pyx_k_70, sizeof(__pyx_k_70), 0, 0, 1, 0},
{&__pyx_kp_s_73, __pyx_k_73, sizeof(__pyx_k_73), 0, 0, 1, 0},
- {&__pyx_kp_s_77, __pyx_k_77, sizeof(__pyx_k_77), 0, 0, 1, 0},
- {&__pyx_kp_s_79, __pyx_k_79, sizeof(__pyx_k_79), 0, 0, 1, 0},
+ {&__pyx_kp_s_78, __pyx_k_78, sizeof(__pyx_k_78), 0, 0, 1, 0},
+ {&__pyx_kp_s_82, __pyx_k_82, sizeof(__pyx_k_82), 0, 0, 1, 0},
{&__pyx_kp_s_84, __pyx_k_84, sizeof(__pyx_k_84), 0, 0, 1, 0},
+ {&__pyx_kp_s_89, __pyx_k_89, sizeof(__pyx_k_89), 0, 0, 1, 0},
{&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0},
{&__pyx_n_s__MT19937, __pyx_k__MT19937, sizeof(__pyx_k__MT19937), 0, 0, 1, 1},
{&__pyx_n_s__TypeError, __pyx_k__TypeError, sizeof(__pyx_k__TypeError), 0, 0, 1, 1},
{&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
{&__pyx_n_s____RandomState_ctor, __pyx_k____RandomState_ctor, sizeof(__pyx_k____RandomState_ctor), 0, 0, 1, 1},
+ {&__pyx_n_s____import__, __pyx_k____import__, sizeof(__pyx_k____import__), 0, 0, 1, 1},
{&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
{&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
{&__pyx_n_s___rand, __pyx_k___rand, sizeof(__pyx_k___rand), 0, 0, 1, 1},
@@ -20698,9 +21576,11 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s__high, __pyx_k__high, sizeof(__pyx_k__high), 0, 0, 1, 1},
{&__pyx_n_s__hypergeometric, __pyx_k__hypergeometric, sizeof(__pyx_k__hypergeometric), 0, 0, 1, 1},
{&__pyx_n_s__iinfo, __pyx_k__iinfo, sizeof(__pyx_k__iinfo), 0, 0, 1, 1},
+ {&__pyx_n_s__index, __pyx_k__index, sizeof(__pyx_k__index), 0, 0, 1, 1},
{&__pyx_n_s__int, __pyx_k__int, sizeof(__pyx_k__int), 0, 0, 1, 1},
{&__pyx_n_s__integer, __pyx_k__integer, sizeof(__pyx_k__integer), 0, 0, 1, 1},
- {&__pyx_n_s__join, __pyx_k__join, sizeof(__pyx_k__join), 0, 0, 1, 1},
+ {&__pyx_n_s__intp, __pyx_k__intp, sizeof(__pyx_k__intp), 0, 0, 1, 1},
+ {&__pyx_n_s__item, __pyx_k__item, sizeof(__pyx_k__item), 0, 0, 1, 1},
{&__pyx_n_s__kappa, __pyx_k__kappa, sizeof(__pyx_k__kappa), 0, 0, 1, 1},
{&__pyx_n_s__l, __pyx_k__l, sizeof(__pyx_k__l), 0, 0, 1, 1},
{&__pyx_n_s__lam, __pyx_k__lam, sizeof(__pyx_k__lam), 0, 0, 1, 1},
@@ -20733,12 +21613,14 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1},
{&__pyx_n_s__nsample, __pyx_k__nsample, sizeof(__pyx_k__nsample), 0, 0, 1, 1},
{&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1},
+ {&__pyx_n_s__operator, __pyx_k__operator, sizeof(__pyx_k__operator), 0, 0, 1, 1},
{&__pyx_n_s__p, __pyx_k__p, sizeof(__pyx_k__p), 0, 0, 1, 1},
{&__pyx_n_s__pareto, __pyx_k__pareto, sizeof(__pyx_k__pareto), 0, 0, 1, 1},
{&__pyx_n_s__permutation, __pyx_k__permutation, sizeof(__pyx_k__permutation), 0, 0, 1, 1},
{&__pyx_n_s__poisson, __pyx_k__poisson, sizeof(__pyx_k__poisson), 0, 0, 1, 1},
{&__pyx_n_s__poisson_lam_max, __pyx_k__poisson_lam_max, sizeof(__pyx_k__poisson_lam_max), 0, 0, 1, 1},
{&__pyx_n_s__power, __pyx_k__power, sizeof(__pyx_k__power), 0, 0, 1, 1},
+ {&__pyx_n_s__prod, __pyx_k__prod, sizeof(__pyx_k__prod), 0, 0, 1, 1},
{&__pyx_n_s__pvals, __pyx_k__pvals, sizeof(__pyx_k__pvals), 0, 0, 1, 1},
{&__pyx_n_s__rand, __pyx_k__rand, sizeof(__pyx_k__rand), 0, 0, 1, 1},
{&__pyx_n_s__randint, __pyx_k__randint, sizeof(__pyx_k__randint), 0, 0, 1, 1},
@@ -20746,9 +21628,11 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s__random, __pyx_k__random, sizeof(__pyx_k__random), 0, 0, 1, 1},
{&__pyx_n_s__random_integers, __pyx_k__random_integers, sizeof(__pyx_k__random_integers), 0, 0, 1, 1},
{&__pyx_n_s__random_sample, __pyx_k__random_sample, sizeof(__pyx_k__random_sample), 0, 0, 1, 1},
+ {&__pyx_n_s__ravel, __pyx_k__ravel, sizeof(__pyx_k__ravel), 0, 0, 1, 1},
{&__pyx_n_s__rayleigh, __pyx_k__rayleigh, sizeof(__pyx_k__rayleigh), 0, 0, 1, 1},
{&__pyx_n_s__reduce, __pyx_k__reduce, sizeof(__pyx_k__reduce), 0, 0, 1, 1},
{&__pyx_n_s__replace, __pyx_k__replace, sizeof(__pyx_k__replace), 0, 0, 1, 1},
+ {&__pyx_n_s__return_index, __pyx_k__return_index, sizeof(__pyx_k__return_index), 0, 0, 1, 1},
{&__pyx_n_s__right, __pyx_k__right, sizeof(__pyx_k__right), 0, 0, 1, 1},
{&__pyx_n_s__scale, __pyx_k__scale, sizeof(__pyx_k__scale), 0, 0, 1, 1},
{&__pyx_n_s__searchsorted, __pyx_k__searchsorted, sizeof(__pyx_k__searchsorted), 0, 0, 1, 1},
@@ -20759,6 +21643,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s__side, __pyx_k__side, sizeof(__pyx_k__side), 0, 0, 1, 1},
{&__pyx_n_s__sigma, __pyx_k__sigma, sizeof(__pyx_k__sigma), 0, 0, 1, 1},
{&__pyx_n_s__size, __pyx_k__size, sizeof(__pyx_k__size), 0, 0, 1, 1},
+ {&__pyx_n_s__sort, __pyx_k__sort, sizeof(__pyx_k__sort), 0, 0, 1, 1},
{&__pyx_n_s__sqrt, __pyx_k__sqrt, sizeof(__pyx_k__sqrt), 0, 0, 1, 1},
{&__pyx_n_s__standard_cauchy, __pyx_k__standard_cauchy, sizeof(__pyx_k__standard_cauchy), 0, 0, 1, 1},
{&__pyx_n_s__standard_gamma, __pyx_k__standard_gamma, sizeof(__pyx_k__standard_gamma), 0, 0, 1, 1},
@@ -20781,8 +21666,8 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
- __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 700; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 701; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
@@ -20792,1649 +21677,1383 @@ static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
- /* "mtrand.pyx":185
+ /* "mtrand.pyx":186
* <void *>oa)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_2 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_2);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2));
- /* "mtrand.pyx":234
+ /* "mtrand.pyx":235
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>oa, <void *>ob)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_3 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_3 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_3);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_3, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_3));
- /* "mtrand.pyx":289
+ /* "mtrand.pyx":290
* <void *>ob, <void *>oc)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 289; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_4 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_4);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4));
- /* "mtrand.pyx":353
+ /* "mtrand.pyx":354
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_5 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 354; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_5);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5));
- /* "mtrand.pyx":402
+ /* "mtrand.pyx":403
* multi = <broadcast>PyArray_MultiIterNew(3, <void*>array, <void *>on, <void *>op)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_6 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_6);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6));
- /* "mtrand.pyx":456
+ /* "mtrand.pyx":457
* <void *>oN)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* on_data = <long *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_7 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_7 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_7);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_7, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_7));
- /* "mtrand.pyx":505
+ /* "mtrand.pyx":506
* multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa)
* if (multi.size != PyArray_SIZE(array)):
* raise ValueError("size is not compatible with inputs") # <<<<<<<<<<<<<<
* for i from 0 <= i < multi.size:
* oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
*/
- __pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_8 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_1)); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_8);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_1));
- PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_s_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8));
- /* "mtrand.pyx":691
+ /* "mtrand.pyx":692
* algorithm_name = state[0]
* if algorithm_name != 'MT19937':
* raise ValueError("algorithm must be 'MT19937'") # <<<<<<<<<<<<<<
* key, pos = state[1:3]
* if len(state) == 3:
*/
- __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 691; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_10 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_9)); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_10);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_9));
- PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_s_9));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_9));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10));
- /* "mtrand.pyx":704
+ /* "mtrand.pyx":693
+ * if algorithm_name != 'MT19937':
+ * raise ValueError("algorithm must be 'MT19937'")
+ * key, pos = state[1:3] # <<<<<<<<<<<<<<
+ * if len(state) == 3:
+ * has_gauss = 0
+ */
+ __pyx_k_slice_11 = PySlice_New(__pyx_int_1, __pyx_int_3, Py_None); if (unlikely(!__pyx_k_slice_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 693; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_slice_11);
+ __Pyx_GIVEREF(__pyx_k_slice_11);
+
+ /* "mtrand.pyx":698
+ * cached_gaussian = 0.0
+ * else:
+ * has_gauss, cached_gaussian = state[3:5] # <<<<<<<<<<<<<<
+ * try:
+ * obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1)
+ */
+ __pyx_k_slice_12 = PySlice_New(__pyx_int_3, __pyx_int_5, Py_None); if (unlikely(!__pyx_k_slice_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 698; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_slice_12);
+ __Pyx_GIVEREF(__pyx_k_slice_12);
+
+ /* "mtrand.pyx":705
* obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1)
* if PyArray_DIM(obj, 0) != 624:
* raise ValueError("state must be 624 longs") # <<<<<<<<<<<<<<
* memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long))
* self.internal_state.pos = pos
*/
- __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_12);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_11));
- PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_s_11));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_11));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12));
+ __pyx_k_tuple_14 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_13)); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_14);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14));
- /* "mtrand.pyx":876
+ /* "mtrand.pyx":877
*
* if lo >= hi :
* raise ValueError("low >= high") # <<<<<<<<<<<<<<
*
* diff = <unsigned long>hi - <unsigned long>lo - 1UL
*/
- __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 876; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_14);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_13));
- PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_s_13));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14));
-
- /* "mtrand.pyx":1000
- * pop_size = a #population size
- * else:
- * raise ValueError("a must be greater than 0") # <<<<<<<<<<<<<<
- * else:
- * a = np.array(a, ndmin=1, copy=0)
- */
- __pyx_k_tuple_17 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1000; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_17);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_16));
- PyTuple_SET_ITEM(__pyx_k_tuple_17, 0, ((PyObject *)__pyx_kp_s_16));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_16));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_17));
+ __pyx_k_tuple_16 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_15)); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_16);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_16));
/* "mtrand.pyx":1004
- * a = np.array(a, ndmin=1, copy=0)
- * if a.ndim != 1:
- * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<<
- * pop_size = a.size
- * if pop_size is 0:
+ * pop_size = operator.index(a.item())
+ * except TypeError:
+ * raise ValueError("a must be 1-dimensional or an integer") # <<<<<<<<<<<<<<
+ * if pop_size <= 0:
+ * raise ValueError("a must be greater than 0")
*/
- __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_k_tuple_19 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_18)); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1004; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_k_tuple_19);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_18));
- PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_kp_s_18));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19));
- /* "mtrand.pyx":1007
- * pop_size = a.size
+ /* "mtrand.pyx":1006
+ * raise ValueError("a must be 1-dimensional or an integer")
+ * if pop_size <= 0:
+ * raise ValueError("a must be greater than 0") # <<<<<<<<<<<<<<
+ * elif a.ndim != 1:
+ * raise ValueError("a must be 1-dimensional")
+ */
+ __pyx_k_tuple_21 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_20)); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_21);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21));
+
+ /* "mtrand.pyx":1008
+ * raise ValueError("a must be greater than 0")
+ * elif a.ndim != 1:
+ * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<<
+ * else:
+ * pop_size = a.shape[0]
+ */
+ __pyx_k_tuple_23 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_22)); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_23);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23));
+
+ /* "mtrand.pyx":1012
+ * pop_size = a.shape[0]
* if pop_size is 0:
* raise ValueError("a must be non-empty") # <<<<<<<<<<<<<<
*
* if None != p:
*/
- __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_21);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_20));
- PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_kp_s_20));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21));
+ __pyx_k_tuple_25 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_24)); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_25);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25));
- /* "mtrand.pyx":1012
- * p = np.array(p, dtype=np.double, ndmin=1, copy=0)
+ /* "mtrand.pyx":1017
+ * p = np.array(p, dtype=np.double, ndmin=1, copy=False)
* if p.ndim != 1:
* raise ValueError("p must be 1-dimensional") # <<<<<<<<<<<<<<
* if p.size != pop_size:
* raise ValueError("a and p must have same size")
*/
- __pyx_k_tuple_23 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_23);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_22));
- PyTuple_SET_ITEM(__pyx_k_tuple_23, 0, ((PyObject *)__pyx_kp_s_22));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_22));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_23));
+ __pyx_k_tuple_27 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_26)); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1017; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_27);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27));
- /* "mtrand.pyx":1014
+ /* "mtrand.pyx":1019
* raise ValueError("p must be 1-dimensional")
* if p.size != pop_size:
* raise ValueError("a and p must have same size") # <<<<<<<<<<<<<<
* if np.any(p < 0):
* raise ValueError("probabilities are not non-negative")
*/
- __pyx_k_tuple_25 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_25);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_24));
- PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_kp_s_24));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25));
+ __pyx_k_tuple_29 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_28)); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1019; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_29);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29));
- /* "mtrand.pyx":1016
+ /* "mtrand.pyx":1021
* raise ValueError("a and p must have same size")
* if np.any(p < 0):
* raise ValueError("probabilities are not non-negative") # <<<<<<<<<<<<<<
* if not np.allclose(p.sum(), 1):
* raise ValueError("probabilities do not sum to 1")
*/
- __pyx_k_tuple_27 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_27);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_26));
- PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_kp_s_26));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_26));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27));
+ __pyx_k_tuple_31 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_30)); if (unlikely(!__pyx_k_tuple_31)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1021; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_31);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_31));
- /* "mtrand.pyx":1018
+ /* "mtrand.pyx":1023
* raise ValueError("probabilities are not non-negative")
* if not np.allclose(p.sum(), 1):
* raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<<
*
- * # Actual sampling
+ * shape = size
*/
- __pyx_k_tuple_29 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_29)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1018; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_29);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_28));
- PyTuple_SET_ITEM(__pyx_k_tuple_29, 0, ((PyObject *)__pyx_kp_s_28));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_28));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_29));
+ __pyx_k_tuple_33 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_32)); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_33);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_33));
- /* "mtrand.pyx":1036
+ /* "mtrand.pyx":1043
+ * else:
+ * if size > pop_size:
+ * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<<
+ * "population when 'replace=False'")
+ *
+ */
+ __pyx_k_tuple_35 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_34)); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_35);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35));
+
+ /* "mtrand.pyx":1048
* if None != p:
* if np.sum(p > 0) < size:
* raise ValueError("Fewer non-zero entries in p than size") # <<<<<<<<<<<<<<
* n_uniq = 0
* p = p.copy()
*/
- __pyx_k_tuple_34 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_34)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1036; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_34);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_33));
- PyTuple_SET_ITEM(__pyx_k_tuple_34, 0, ((PyObject *)__pyx_kp_s_33));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34));
+ __pyx_k_tuple_37 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_36)); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1048; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_37);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37));
- /* "mtrand.pyx":1448
+ /* "mtrand.pyx":1073
+ * if shape is None and isinstance(idx, np.ndarray):
+ * # In most cases a scalar will have been made an array
+ * idx = idx.item(0) # <<<<<<<<<<<<<<
+ *
+ * #Use samples as indices for a if a is array-like
+ */
+ __pyx_k_tuple_38 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_38);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_38));
+
+ /* "mtrand.pyx":1085
+ * # array, taking into account that np.array(item) may not work
+ * # for object arrays.
+ * res = np.empty((), dtype=a.dtype) # <<<<<<<<<<<<<<
+ * res[()] = a[idx]
+ * return res
+ */
+ __pyx_k_tuple_39 = PyTuple_Pack(1, ((PyObject *)__pyx_empty_tuple)); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1085; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_39);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39));
+
+ /* "mtrand.pyx":1479
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale)
*
*/
- __pyx_k_tuple_40 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_40)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1448; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_40);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_40, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_40));
+ __pyx_k_tuple_45 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_45);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_45));
- /* "mtrand.pyx":1456
+ /* "mtrand.pyx":1487
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_normal, size, oloc, oscale)
*
*/
- __pyx_k_tuple_41 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1456; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_41);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41));
+ __pyx_k_tuple_46 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_46);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_46));
- /* "mtrand.pyx":1503
+ /* "mtrand.pyx":1534
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* if fb <= 0:
* raise ValueError("b <= 0")
*/
- __pyx_k_tuple_43 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_43)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_43);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_43, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_43));
+ __pyx_k_tuple_48 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_48)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_48);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_48));
- /* "mtrand.pyx":1505
+ /* "mtrand.pyx":1536
* raise ValueError("a <= 0")
* if fb <= 0:
* raise ValueError("b <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb)
*
*/
- __pyx_k_tuple_45 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_45);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_44));
- PyTuple_SET_ITEM(__pyx_k_tuple_45, 0, ((PyObject *)__pyx_kp_s_44));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_44));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_45));
+ __pyx_k_tuple_50 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_49)); if (unlikely(!__pyx_k_tuple_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_50);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_50));
- /* "mtrand.pyx":1513
+ /* "mtrand.pyx":1544
* ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(ob, 0)):
* raise ValueError("b <= 0")
*/
- __pyx_k_tuple_46 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_46);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_46, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_46));
+ __pyx_k_tuple_51 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_51)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_51);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_51));
- /* "mtrand.pyx":1515
+ /* "mtrand.pyx":1546
* raise ValueError("a <= 0")
* if np.any(np.less_equal(ob, 0)):
* raise ValueError("b <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_beta, size, oa, ob)
*
*/
- __pyx_k_tuple_47 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_47)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_47);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_44));
- PyTuple_SET_ITEM(__pyx_k_tuple_47, 0, ((PyObject *)__pyx_kp_s_44));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_44));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_47));
+ __pyx_k_tuple_52 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_49)); if (unlikely(!__pyx_k_tuple_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_52);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_52));
- /* "mtrand.pyx":1562
+ /* "mtrand.pyx":1593
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_exponential, size, fscale)
*
*/
- __pyx_k_tuple_49 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_49)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_49);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_49, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_49));
+ __pyx_k_tuple_54 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_54)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_54);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_54));
- /* "mtrand.pyx":1569
+ /* "mtrand.pyx":1600
* oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_exponential, size, oscale)
*
*/
- __pyx_k_tuple_50 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_50);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_50, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_50));
+ __pyx_k_tuple_55 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_55);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_55));
- /* "mtrand.pyx":1673
+ /* "mtrand.pyx":1704
* if not PyErr_Occurred():
* if fshape <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape)
*
*/
- __pyx_k_tuple_52 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1673; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_52);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_51));
- PyTuple_SET_ITEM(__pyx_k_tuple_52, 0, ((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_52));
+ __pyx_k_tuple_57 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_57)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_57);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_57));
- /* "mtrand.pyx":1679
+ /* "mtrand.pyx":1710
* oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape)
*
*/
- __pyx_k_tuple_53 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_53)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_53);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_51));
- PyTuple_SET_ITEM(__pyx_k_tuple_53, 0, ((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_53));
+ __pyx_k_tuple_58 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_58)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1710; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_58);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_58));
- /* "mtrand.pyx":1759
+ /* "mtrand.pyx":1790
* if not PyErr_Occurred():
* if fshape <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if fscale <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_k_tuple_55 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1759; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_55);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_51));
- PyTuple_SET_ITEM(__pyx_k_tuple_55, 0, ((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_55));
+ __pyx_k_tuple_60 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_60)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_60);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_60));
- /* "mtrand.pyx":1761
+ /* "mtrand.pyx":1792
* raise ValueError("shape <= 0")
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale)
*
*/
- __pyx_k_tuple_56 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_56)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1761; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_56);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_56, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_56));
+ __pyx_k_tuple_61 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_61);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61));
- /* "mtrand.pyx":1768
+ /* "mtrand.pyx":1799
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oshape, 0.0)):
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0")
*/
- __pyx_k_tuple_57 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_57)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_57);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_51));
- PyTuple_SET_ITEM(__pyx_k_tuple_57, 0, ((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_57));
+ __pyx_k_tuple_62 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_62)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_62);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_62));
- /* "mtrand.pyx":1770
+ /* "mtrand.pyx":1801
* raise ValueError("shape <= 0")
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale)
*
*/
- __pyx_k_tuple_58 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_58)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1770; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_58);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_58, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_58));
+ __pyx_k_tuple_63 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_63)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_63);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_63));
- /* "mtrand.pyx":1861
+ /* "mtrand.pyx":1891
* if not PyErr_Occurred():
* if fdfnum <= 0:
* raise ValueError("shape <= 0") # <<<<<<<<<<<<<<
* if fdfden <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_k_tuple_59 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_59)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1861; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_59);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_51));
- PyTuple_SET_ITEM(__pyx_k_tuple_59, 0, ((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_59));
+ __pyx_k_tuple_64 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1891; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_64);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_64));
- /* "mtrand.pyx":1863
+ /* "mtrand.pyx":1893
* raise ValueError("shape <= 0")
* if fdfden <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden)
*
*/
- __pyx_k_tuple_60 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_60)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1863; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_60);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_60, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_60));
+ __pyx_k_tuple_65 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_65)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_65);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_65));
- /* "mtrand.pyx":1871
+ /* "mtrand.pyx":1901
* odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odfnum, 0.0)):
* raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
*/
- __pyx_k_tuple_62 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_62)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1871; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_62);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_61));
- PyTuple_SET_ITEM(__pyx_k_tuple_62, 0, ((PyObject *)__pyx_kp_s_61));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_61));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_62));
+ __pyx_k_tuple_67 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_66)); if (unlikely(!__pyx_k_tuple_67)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1901; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_67);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_67));
- /* "mtrand.pyx":1873
+ /* "mtrand.pyx":1903
* raise ValueError("dfnum <= 0")
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_f, size, odfnum, odfden)
*
*/
- __pyx_k_tuple_64 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_64);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_63));
- PyTuple_SET_ITEM(__pyx_k_tuple_64, 0, ((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_64));
+ __pyx_k_tuple_69 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_68)); if (unlikely(!__pyx_k_tuple_69)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_69);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_69));
- /* "mtrand.pyx":1948
+ /* "mtrand.pyx":1978
* if not PyErr_Occurred():
* if fdfnum <= 1:
* raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<<
* if fdfden <= 0:
* raise ValueError("dfden <= 0")
*/
- __pyx_k_tuple_66 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_66)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_66);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_65));
- PyTuple_SET_ITEM(__pyx_k_tuple_66, 0, ((PyObject *)__pyx_kp_s_65));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_65));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_66));
+ __pyx_k_tuple_71 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_70)); if (unlikely(!__pyx_k_tuple_71)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1978; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_71);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_71));
- /* "mtrand.pyx":1950
+ /* "mtrand.pyx":1980
* raise ValueError("dfnum <= 1")
* if fdfden <= 0:
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* if fnonc < 0:
* raise ValueError("nonc < 0")
*/
- __pyx_k_tuple_67 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_67)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_67);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_63));
- PyTuple_SET_ITEM(__pyx_k_tuple_67, 0, ((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_67));
+ __pyx_k_tuple_72 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_68)); if (unlikely(!__pyx_k_tuple_72)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_72);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_72));
- /* "mtrand.pyx":1952
+ /* "mtrand.pyx":1982
* raise ValueError("dfden <= 0")
* if fnonc < 0:
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
* fdfnum, fdfden, fnonc)
*/
- __pyx_k_tuple_69 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_69)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_69);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_68));
- PyTuple_SET_ITEM(__pyx_k_tuple_69, 0, ((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_69));
+ __pyx_k_tuple_74 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_73)); if (unlikely(!__pyx_k_tuple_74)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1982; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_74);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_74));
- /* "mtrand.pyx":1963
+ /* "mtrand.pyx":1993
*
* if np.any(np.less_equal(odfnum, 1.0)):
* raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0")
*/
- __pyx_k_tuple_70 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_70)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1963; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_70);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_65));
- PyTuple_SET_ITEM(__pyx_k_tuple_70, 0, ((PyObject *)__pyx_kp_s_65));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_65));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_70));
+ __pyx_k_tuple_75 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_70)); if (unlikely(!__pyx_k_tuple_75)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1993; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_75);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_75));
- /* "mtrand.pyx":1965
+ /* "mtrand.pyx":1995
* raise ValueError("dfnum <= 1")
* if np.any(np.less_equal(odfden, 0.0)):
* raise ValueError("dfden <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less(ononc, 0.0)):
* raise ValueError("nonc < 0")
*/
- __pyx_k_tuple_71 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_71)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1965; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_71);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_63));
- PyTuple_SET_ITEM(__pyx_k_tuple_71, 0, ((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_63));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_71));
+ __pyx_k_tuple_76 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_68)); if (unlikely(!__pyx_k_tuple_76)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1995; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_76);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_76));
- /* "mtrand.pyx":1967
+ /* "mtrand.pyx":1997
* raise ValueError("dfden <= 0")
* if np.any(np.less(ononc, 0.0)):
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum,
* odfden, ononc)
*/
- __pyx_k_tuple_72 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_72)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1967; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_72);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_68));
- PyTuple_SET_ITEM(__pyx_k_tuple_72, 0, ((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_72));
+ __pyx_k_tuple_77 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_73)); if (unlikely(!__pyx_k_tuple_77)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_77);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_77));
- /* "mtrand.pyx":2039
+ /* "mtrand.pyx":2069
* if not PyErr_Occurred():
* if fdf <= 0:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf)
*
*/
- __pyx_k_tuple_74 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_74)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2039; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_74);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_73));
- PyTuple_SET_ITEM(__pyx_k_tuple_74, 0, ((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_74));
+ __pyx_k_tuple_79 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_79)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2069; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_79);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_79));
- /* "mtrand.pyx":2046
+ /* "mtrand.pyx":2076
* odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_chisquare, size, odf)
*
*/
- __pyx_k_tuple_75 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_75)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_75);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_73));
- PyTuple_SET_ITEM(__pyx_k_tuple_75, 0, ((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_75));
+ __pyx_k_tuple_80 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_80)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2076; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_80);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_80));
- /* "mtrand.pyx":2124
+ /* "mtrand.pyx":2154
* if not PyErr_Occurred():
* if fdf <= 1:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* if fnonc <= 0:
* raise ValueError("nonc <= 0")
*/
- __pyx_k_tuple_76 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_76)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_76);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_73));
- PyTuple_SET_ITEM(__pyx_k_tuple_76, 0, ((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_76));
+ __pyx_k_tuple_81 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_81)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_81);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_81));
- /* "mtrand.pyx":2126
+ /* "mtrand.pyx":2156
* raise ValueError("df <= 0")
* if fnonc <= 0:
* raise ValueError("nonc <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_noncentral_chisquare,
* size, fdf, fnonc)
*/
- __pyx_k_tuple_78 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_78)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_78);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_77));
- PyTuple_SET_ITEM(__pyx_k_tuple_78, 0, ((PyObject *)__pyx_kp_s_77));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_77));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_78));
+ __pyx_k_tuple_83 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_82)); if (unlikely(!__pyx_k_tuple_83)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_83);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_83));
- /* "mtrand.pyx":2135
+ /* "mtrand.pyx":2165
* ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 1") # <<<<<<<<<<<<<<
* if np.any(np.less_equal(ononc, 0.0)):
* raise ValueError("nonc < 0")
*/
- __pyx_k_tuple_80 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_80)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_80);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_79));
- PyTuple_SET_ITEM(__pyx_k_tuple_80, 0, ((PyObject *)__pyx_kp_s_79));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_79));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_80));
+ __pyx_k_tuple_85 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_84)); if (unlikely(!__pyx_k_tuple_85)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_85);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_85));
- /* "mtrand.pyx":2137
+ /* "mtrand.pyx":2167
* raise ValueError("df <= 1")
* if np.any(np.less_equal(ononc, 0.0)):
* raise ValueError("nonc < 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
* odf, ononc)
*/
- __pyx_k_tuple_81 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_81)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_81);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_68));
- PyTuple_SET_ITEM(__pyx_k_tuple_81, 0, ((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_68));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_81));
+ __pyx_k_tuple_86 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_73)); if (unlikely(!__pyx_k_tuple_86)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2167; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_86);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_86));
- /* "mtrand.pyx":2293
+ /* "mtrand.pyx":2323
* if not PyErr_Occurred():
* if fdf <= 0:
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf)
*
*/
- __pyx_k_tuple_82 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_82)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2293; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_82);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_73));
- PyTuple_SET_ITEM(__pyx_k_tuple_82, 0, ((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_82));
+ __pyx_k_tuple_87 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_87)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2323; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_87);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_87));
- /* "mtrand.pyx":2300
+ /* "mtrand.pyx":2330
* odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(odf, 0.0)):
* raise ValueError("df <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_standard_t, size, odf)
*
*/
- __pyx_k_tuple_83 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_83)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2300; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_83);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_73));
- PyTuple_SET_ITEM(__pyx_k_tuple_83, 0, ((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_73));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_83));
+ __pyx_k_tuple_88 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_88)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2330; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_88);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_88));
- /* "mtrand.pyx":2386
+ /* "mtrand.pyx":2416
* if not PyErr_Occurred():
* if fkappa < 0:
* raise ValueError("kappa < 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa)
*
*/
- __pyx_k_tuple_85 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_85)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_85);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_84));
- PyTuple_SET_ITEM(__pyx_k_tuple_85, 0, ((PyObject *)__pyx_kp_s_84));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_84));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_85));
+ __pyx_k_tuple_90 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_89)); if (unlikely(!__pyx_k_tuple_90)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2416; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_90);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_90));
- /* "mtrand.pyx":2394
+ /* "mtrand.pyx":2424
* okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(okappa, 0.0)):
* raise ValueError("kappa < 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa)
*
*/
- __pyx_k_tuple_86 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_86)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2394; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_86);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_84));
- PyTuple_SET_ITEM(__pyx_k_tuple_86, 0, ((PyObject *)__pyx_kp_s_84));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_84));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_86));
+ __pyx_k_tuple_91 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_89)); if (unlikely(!__pyx_k_tuple_91)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2424; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_91);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_91));
- /* "mtrand.pyx":2483
+ /* "mtrand.pyx":2513
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_pareto, size, fa)
*
*/
- __pyx_k_tuple_87 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_87)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_87);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_87, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_87));
+ __pyx_k_tuple_92 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_92)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_92);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_92));
- /* "mtrand.pyx":2490
+ /* "mtrand.pyx":2520
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_pareto, size, oa)
*
*/
- __pyx_k_tuple_88 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_88)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_88);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_88, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_88));
+ __pyx_k_tuple_93 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_93)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_93);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_93));
- /* "mtrand.pyx":2583
+ /* "mtrand.pyx":2613
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_weibull, size, fa)
*
*/
- __pyx_k_tuple_89 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_89)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2583; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_89);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_89, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_89));
+ __pyx_k_tuple_94 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_94)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2613; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_94);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_94));
- /* "mtrand.pyx":2590
+ /* "mtrand.pyx":2620
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_weibull, size, oa)
*
*/
- __pyx_k_tuple_90 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_90)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_90);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_90, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_90));
+ __pyx_k_tuple_95 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_95)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2620; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_95);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_95));
- /* "mtrand.pyx":2692
+ /* "mtrand.pyx":2722
* if not PyErr_Occurred():
* if fa <= 0:
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_power, size, fa)
*
*/
- __pyx_k_tuple_91 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_91)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2692; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_91);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_91, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_91));
+ __pyx_k_tuple_96 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_96)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_96);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_96));
- /* "mtrand.pyx":2699
+ /* "mtrand.pyx":2729
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 0.0)):
* raise ValueError("a <= 0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_power, size, oa)
*
*/
- __pyx_k_tuple_92 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_92)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2699; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_92);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_42));
- PyTuple_SET_ITEM(__pyx_k_tuple_92, 0, ((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_92));
+ __pyx_k_tuple_97 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_97)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_97);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_97));
- /* "mtrand.pyx":2782
+ /* "mtrand.pyx":2812
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale)
*
*/
- __pyx_k_tuple_95 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_95)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2782; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_95);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_95, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_95));
+ __pyx_k_tuple_100 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_100)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2812; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_100);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_100));
- /* "mtrand.pyx":2789
+ /* "mtrand.pyx":2819
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale)
*
*/
- __pyx_k_tuple_96 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_96)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2789; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_96);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_96, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_96));
+ __pyx_k_tuple_101 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_101)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_101);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_101));
- /* "mtrand.pyx":2913
+ /* "mtrand.pyx":2943
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale)
*
*/
- __pyx_k_tuple_99 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_99)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_99);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_99, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_99));
+ __pyx_k_tuple_104 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_104)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_104);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_104));
- /* "mtrand.pyx":2920
+ /* "mtrand.pyx":2950
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale)
*
*/
- __pyx_k_tuple_100 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_100)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_100);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_100, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_100));
+ __pyx_k_tuple_105 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_105)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2950; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_105);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_105));
- /* "mtrand.pyx":3001
+ /* "mtrand.pyx":3031
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale)
*
*/
- __pyx_k_tuple_103 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_103)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3001; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_103);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_103, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_103));
+ __pyx_k_tuple_108 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_108)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3031; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_108);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_108));
- /* "mtrand.pyx":3008
+ /* "mtrand.pyx":3038
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale)
*
*/
- __pyx_k_tuple_104 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_104)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3008; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_104);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_104, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_104));
+ __pyx_k_tuple_109 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_109)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_109);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_109));
- /* "mtrand.pyx":3121
+ /* "mtrand.pyx":3151
* if not PyErr_Occurred():
* if fsigma <= 0:
* raise ValueError("sigma <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma)
*
*/
- __pyx_k_tuple_108 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_108)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_108);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_107));
- PyTuple_SET_ITEM(__pyx_k_tuple_108, 0, ((PyObject *)__pyx_kp_s_107));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_107));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_108));
+ __pyx_k_tuple_113 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_112)); if (unlikely(!__pyx_k_tuple_113)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_113);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_113));
- /* "mtrand.pyx":3129
+ /* "mtrand.pyx":3159
* osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(osigma, 0.0)):
* raise ValueError("sigma <= 0.0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma)
*
*/
- __pyx_k_tuple_110 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_110)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_110);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_109));
- PyTuple_SET_ITEM(__pyx_k_tuple_110, 0, ((PyObject *)__pyx_kp_s_109));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_109));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_110));
+ __pyx_k_tuple_115 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_114)); if (unlikely(!__pyx_k_tuple_115)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3159; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_115);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_115));
- /* "mtrand.pyx":3194
+ /* "mtrand.pyx":3224
* if not PyErr_Occurred():
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale)
*
*/
- __pyx_k_tuple_112 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_112)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3194; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_112);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_112, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_112));
+ __pyx_k_tuple_117 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_117)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_117);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_117));
- /* "mtrand.pyx":3201
+ /* "mtrand.pyx":3231
* oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oscale, 0.0)):
* raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<<
* return cont1_array(self.internal_state, rk_rayleigh, size, oscale)
*
*/
- __pyx_k_tuple_114 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_114)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3201; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_114);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_113));
- PyTuple_SET_ITEM(__pyx_k_tuple_114, 0, ((PyObject *)__pyx_kp_s_113));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_113));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_114));
+ __pyx_k_tuple_119 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_118)); if (unlikely(!__pyx_k_tuple_119)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3231; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_119);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_119));
- /* "mtrand.pyx":3274
+ /* "mtrand.pyx":3304
* if not PyErr_Occurred():
* if fmean <= 0:
* raise ValueError("mean <= 0") # <<<<<<<<<<<<<<
* if fscale <= 0:
* raise ValueError("scale <= 0")
*/
- __pyx_k_tuple_116 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_116)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3274; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_116);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_115));
- PyTuple_SET_ITEM(__pyx_k_tuple_116, 0, ((PyObject *)__pyx_kp_s_115));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_115));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_116));
+ __pyx_k_tuple_121 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_120)); if (unlikely(!__pyx_k_tuple_121)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3304; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_121);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_121));
- /* "mtrand.pyx":3276
+ /* "mtrand.pyx":3306
* raise ValueError("mean <= 0")
* if fscale <= 0:
* raise ValueError("scale <= 0") # <<<<<<<<<<<<<<
* return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale)
*
*/
- __pyx_k_tuple_117 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_117)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_117);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_39));
- PyTuple_SET_ITEM(__pyx_k_tuple_117, 0, ((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_39));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_117));
+ __pyx_k_tuple_122 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_122)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3306; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_122);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_122));
- /* "mtrand.pyx":3283
+ /* "mtrand.pyx":3313
* oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(omean,0.0)):
* raise ValueError("mean <= 0.0") # <<<<<<<<<<<<<<
* elif np.any(np.less_equal(oscale,0.0)):
* raise ValueError("scale <= 0.0")
*/
- __pyx_k_tuple_119 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_119)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_119);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_118));
- PyTuple_SET_ITEM(__pyx_k_tuple_119, 0, ((PyObject *)__pyx_kp_s_118));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_118));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_119));
+ __pyx_k_tuple_124 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_123)); if (unlikely(!__pyx_k_tuple_124)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_124);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_124));
- /* "mtrand.pyx":3285
+ /* "mtrand.pyx":3315
* raise ValueError("mean <= 0.0")
* elif np.any(np.less_equal(oscale,0.0)):
* raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<<
* return cont2_array(self.internal_state, rk_wald, size, omean, oscale)
*
*/
- __pyx_k_tuple_120 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_120)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_120);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_113));
- PyTuple_SET_ITEM(__pyx_k_tuple_120, 0, ((PyObject *)__pyx_kp_s_113));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_113));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_120));
+ __pyx_k_tuple_125 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_118)); if (unlikely(!__pyx_k_tuple_125)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3315; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_125);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_125));
- /* "mtrand.pyx":3355
+ /* "mtrand.pyx":3385
* if not PyErr_Occurred():
* if fleft > fmode:
* raise ValueError("left > mode") # <<<<<<<<<<<<<<
* if fmode > fright:
* raise ValueError("mode > right")
*/
- __pyx_k_tuple_122 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_122)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3355; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_122);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_121));
- PyTuple_SET_ITEM(__pyx_k_tuple_122, 0, ((PyObject *)__pyx_kp_s_121));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_121));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_122));
+ __pyx_k_tuple_127 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_126)); if (unlikely(!__pyx_k_tuple_127)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_127);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_127));
- /* "mtrand.pyx":3357
+ /* "mtrand.pyx":3387
* raise ValueError("left > mode")
* if fmode > fright:
* raise ValueError("mode > right") # <<<<<<<<<<<<<<
* if fleft == fright:
* raise ValueError("left == right")
*/
- __pyx_k_tuple_124 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_124)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_124);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_123));
- PyTuple_SET_ITEM(__pyx_k_tuple_124, 0, ((PyObject *)__pyx_kp_s_123));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_123));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_124));
+ __pyx_k_tuple_129 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_128)); if (unlikely(!__pyx_k_tuple_129)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3387; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_129);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_129));
- /* "mtrand.pyx":3359
+ /* "mtrand.pyx":3389
* raise ValueError("mode > right")
* if fleft == fright:
* raise ValueError("left == right") # <<<<<<<<<<<<<<
* return cont3_array_sc(self.internal_state, rk_triangular, size, fleft,
* fmode, fright)
*/
- __pyx_k_tuple_126 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_126)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_126);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_125));
- PyTuple_SET_ITEM(__pyx_k_tuple_126, 0, ((PyObject *)__pyx_kp_s_125));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_125));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_126));
+ __pyx_k_tuple_131 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_130)); if (unlikely(!__pyx_k_tuple_131)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3389; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_131);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_131));
- /* "mtrand.pyx":3369
+ /* "mtrand.pyx":3399
*
* if np.any(np.greater(oleft, omode)):
* raise ValueError("left > mode") # <<<<<<<<<<<<<<
* if np.any(np.greater(omode, oright)):
* raise ValueError("mode > right")
*/
- __pyx_k_tuple_127 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_127)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_127);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_121));
- PyTuple_SET_ITEM(__pyx_k_tuple_127, 0, ((PyObject *)__pyx_kp_s_121));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_121));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_127));
+ __pyx_k_tuple_132 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_126)); if (unlikely(!__pyx_k_tuple_132)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3399; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_132);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_132));
- /* "mtrand.pyx":3371
+ /* "mtrand.pyx":3401
* raise ValueError("left > mode")
* if np.any(np.greater(omode, oright)):
* raise ValueError("mode > right") # <<<<<<<<<<<<<<
* if np.any(np.equal(oleft, oright)):
* raise ValueError("left == right")
*/
- __pyx_k_tuple_128 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_128)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3371; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_128);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_123));
- PyTuple_SET_ITEM(__pyx_k_tuple_128, 0, ((PyObject *)__pyx_kp_s_123));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_123));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_128));
+ __pyx_k_tuple_133 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_128)); if (unlikely(!__pyx_k_tuple_133)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_133);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_133));
- /* "mtrand.pyx":3373
+ /* "mtrand.pyx":3403
* raise ValueError("mode > right")
* if np.any(np.equal(oleft, oright)):
* raise ValueError("left == right") # <<<<<<<<<<<<<<
* return cont3_array(self.internal_state, rk_triangular, size, oleft,
* omode, oright)
*/
- __pyx_k_tuple_129 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_129)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_129);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_125));
- PyTuple_SET_ITEM(__pyx_k_tuple_129, 0, ((PyObject *)__pyx_kp_s_125));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_125));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_129));
+ __pyx_k_tuple_134 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_130)); if (unlikely(!__pyx_k_tuple_134)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_134);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_134));
- /* "mtrand.pyx":3467
+ /* "mtrand.pyx":3497
* if not PyErr_Occurred():
- * if ln <= 0:
- * raise ValueError("n <= 0") # <<<<<<<<<<<<<<
+ * if ln < 0:
+ * raise ValueError("n < 0") # <<<<<<<<<<<<<<
* if fp < 0:
* raise ValueError("p < 0")
*/
- __pyx_k_tuple_131 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_131)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_131);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_130));
- PyTuple_SET_ITEM(__pyx_k_tuple_131, 0, ((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_131));
+ __pyx_k_tuple_136 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_135)); if (unlikely(!__pyx_k_tuple_136)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_136);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_136));
- /* "mtrand.pyx":3469
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3499
+ * raise ValueError("n < 0")
* if fp < 0:
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* elif fp > 1:
* raise ValueError("p > 1")
*/
- __pyx_k_tuple_133 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_133)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_133);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_132));
- PyTuple_SET_ITEM(__pyx_k_tuple_133, 0, ((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_133));
+ __pyx_k_tuple_138 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_137)); if (unlikely(!__pyx_k_tuple_138)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_138);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_138));
- /* "mtrand.pyx":3471
+ /* "mtrand.pyx":3501
* raise ValueError("p < 0")
* elif fp > 1:
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp)
*
*/
- __pyx_k_tuple_135 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_135)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_135);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_134));
- PyTuple_SET_ITEM(__pyx_k_tuple_135, 0, ((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_135));
+ __pyx_k_tuple_140 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_140)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_140);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_140));
- /* "mtrand.pyx":3479
+ /* "mtrand.pyx":3509
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- * if np.any(np.less_equal(n, 0)):
- * raise ValueError("n <= 0") # <<<<<<<<<<<<<<
+ * if np.any(np.less(n, 0)):
+ * raise ValueError("n < 0") # <<<<<<<<<<<<<<
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
*/
- __pyx_k_tuple_136 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_136)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_136);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_130));
- PyTuple_SET_ITEM(__pyx_k_tuple_136, 0, ((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_136));
+ __pyx_k_tuple_141 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_135)); if (unlikely(!__pyx_k_tuple_141)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_141);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_141));
- /* "mtrand.pyx":3481
- * raise ValueError("n <= 0")
+ /* "mtrand.pyx":3511
+ * raise ValueError("n < 0")
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
*/
- __pyx_k_tuple_137 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_137)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_137);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_132));
- PyTuple_SET_ITEM(__pyx_k_tuple_137, 0, ((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_137));
+ __pyx_k_tuple_142 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_137)); if (unlikely(!__pyx_k_tuple_142)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_142);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_142));
- /* "mtrand.pyx":3483
+ /* "mtrand.pyx":3513
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discnp_array(self.internal_state, rk_binomial, size, on, op)
*
*/
- __pyx_k_tuple_138 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_138)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_138);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_134));
- PyTuple_SET_ITEM(__pyx_k_tuple_138, 0, ((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_138));
+ __pyx_k_tuple_143 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_143)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_143);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_143));
- /* "mtrand.pyx":3560
+ /* "mtrand.pyx":3590
* if not PyErr_Occurred():
* if fn <= 0:
* raise ValueError("n <= 0") # <<<<<<<<<<<<<<
* if fp < 0:
* raise ValueError("p < 0")
*/
- __pyx_k_tuple_139 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_139)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3560; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_139);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_130));
- PyTuple_SET_ITEM(__pyx_k_tuple_139, 0, ((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_139));
+ __pyx_k_tuple_145 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_144)); if (unlikely(!__pyx_k_tuple_145)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3590; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_145);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_145));
- /* "mtrand.pyx":3562
+ /* "mtrand.pyx":3592
* raise ValueError("n <= 0")
* if fp < 0:
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* elif fp > 1:
* raise ValueError("p > 1")
*/
- __pyx_k_tuple_140 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_140)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_140);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_132));
- PyTuple_SET_ITEM(__pyx_k_tuple_140, 0, ((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_140));
+ __pyx_k_tuple_146 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_137)); if (unlikely(!__pyx_k_tuple_146)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_146);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_146));
- /* "mtrand.pyx":3564
+ /* "mtrand.pyx":3594
* raise ValueError("p < 0")
* elif fp > 1:
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discdd_array_sc(self.internal_state, rk_negative_binomial,
* size, fn, fp)
*/
- __pyx_k_tuple_141 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_141)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3564; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_141);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_134));
- PyTuple_SET_ITEM(__pyx_k_tuple_141, 0, ((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_141));
+ __pyx_k_tuple_147 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_147)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3594; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_147);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_147));
- /* "mtrand.pyx":3573
+ /* "mtrand.pyx":3603
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(n, 0)):
* raise ValueError("n <= 0") # <<<<<<<<<<<<<<
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0")
*/
- __pyx_k_tuple_142 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_142)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3573; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_142);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_130));
- PyTuple_SET_ITEM(__pyx_k_tuple_142, 0, ((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_130));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_142));
+ __pyx_k_tuple_148 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_144)); if (unlikely(!__pyx_k_tuple_148)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3603; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_148);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_148));
- /* "mtrand.pyx":3575
+ /* "mtrand.pyx":3605
* raise ValueError("n <= 0")
* if np.any(np.less(p, 0)):
* raise ValueError("p < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1")
*/
- __pyx_k_tuple_143 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_143)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3575; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_143);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_132));
- PyTuple_SET_ITEM(__pyx_k_tuple_143, 0, ((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_132));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_143));
+ __pyx_k_tuple_149 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_137)); if (unlikely(!__pyx_k_tuple_149)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_149);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_149));
- /* "mtrand.pyx":3577
+ /* "mtrand.pyx":3607
* raise ValueError("p < 0")
* if np.any(np.greater(p, 1)):
* raise ValueError("p > 1") # <<<<<<<<<<<<<<
* return discdd_array(self.internal_state, rk_negative_binomial, size,
* on, op)
*/
- __pyx_k_tuple_144 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_144)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_144);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_134));
- PyTuple_SET_ITEM(__pyx_k_tuple_144, 0, ((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_134));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_144));
+ __pyx_k_tuple_150 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_150)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3607; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_150);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_150));
- /* "mtrand.pyx":3638
+ /* "mtrand.pyx":3668
* if not PyErr_Occurred():
* if lam < 0:
* raise ValueError("lam < 0") # <<<<<<<<<<<<<<
* if lam > self.poisson_lam_max:
* raise ValueError("lam value too large")
*/
- __pyx_k_tuple_147 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_147)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_147);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_146));
- PyTuple_SET_ITEM(__pyx_k_tuple_147, 0, ((PyObject *)__pyx_kp_s_146));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_146));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_147));
+ __pyx_k_tuple_153 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_152)); if (unlikely(!__pyx_k_tuple_153)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_153);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_153));
- /* "mtrand.pyx":3640
+ /* "mtrand.pyx":3670
* raise ValueError("lam < 0")
* if lam > self.poisson_lam_max:
* raise ValueError("lam value too large") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_poisson, size, flam)
*
*/
- __pyx_k_tuple_149 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_149)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3640; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_149);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_148));
- PyTuple_SET_ITEM(__pyx_k_tuple_149, 0, ((PyObject *)__pyx_kp_s_148));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_148));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_149));
+ __pyx_k_tuple_155 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_154)); if (unlikely(!__pyx_k_tuple_155)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3670; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_155);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_155));
- /* "mtrand.pyx":3647
+ /* "mtrand.pyx":3677
* olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(olam, 0)):
* raise ValueError("lam < 0") # <<<<<<<<<<<<<<
* if np.any(np.greater(olam, self.poisson_lam_max)):
* raise ValueError("lam value too large.")
*/
- __pyx_k_tuple_150 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_150)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3647; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_150);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_146));
- PyTuple_SET_ITEM(__pyx_k_tuple_150, 0, ((PyObject *)__pyx_kp_s_146));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_146));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_150));
+ __pyx_k_tuple_156 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_152)); if (unlikely(!__pyx_k_tuple_156)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3677; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_156);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_156));
- /* "mtrand.pyx":3649
+ /* "mtrand.pyx":3679
* raise ValueError("lam < 0")
* if np.any(np.greater(olam, self.poisson_lam_max)):
* raise ValueError("lam value too large.") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_poisson, size, olam)
*
*/
- __pyx_k_tuple_152 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_152)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_152);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_151));
- PyTuple_SET_ITEM(__pyx_k_tuple_152, 0, ((PyObject *)__pyx_kp_s_151));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_151));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_152));
+ __pyx_k_tuple_158 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_157)); if (unlikely(!__pyx_k_tuple_158)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3679; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_158);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_158));
- /* "mtrand.pyx":3730
+ /* "mtrand.pyx":3760
* if not PyErr_Occurred():
* if fa <= 1.0:
* raise ValueError("a <= 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_zipf, size, fa)
*
*/
- __pyx_k_tuple_154 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_154)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_154);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_153));
- PyTuple_SET_ITEM(__pyx_k_tuple_154, 0, ((PyObject *)__pyx_kp_s_153));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_153));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_154));
+ __pyx_k_tuple_160 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_159)); if (unlikely(!__pyx_k_tuple_160)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3760; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_160);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_160));
- /* "mtrand.pyx":3737
+ /* "mtrand.pyx":3767
* oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(oa, 1.0)):
* raise ValueError("a <= 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_zipf, size, oa)
*
*/
- __pyx_k_tuple_155 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_155)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3737; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_155);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_153));
- PyTuple_SET_ITEM(__pyx_k_tuple_155, 0, ((PyObject *)__pyx_kp_s_153));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_153));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_155));
+ __pyx_k_tuple_161 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_159)); if (unlikely(!__pyx_k_tuple_161)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3767; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_161);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_161));
- /* "mtrand.pyx":3791
+ /* "mtrand.pyx":3821
* if not PyErr_Occurred():
* if fp < 0.0:
* raise ValueError("p < 0.0") # <<<<<<<<<<<<<<
* if fp > 1.0:
* raise ValueError("p > 1.0")
*/
- __pyx_k_tuple_157 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_157)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_157);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_156));
- PyTuple_SET_ITEM(__pyx_k_tuple_157, 0, ((PyObject *)__pyx_kp_s_156));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_156));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_157));
+ __pyx_k_tuple_163 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_162)); if (unlikely(!__pyx_k_tuple_163)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_163);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_163));
- /* "mtrand.pyx":3793
+ /* "mtrand.pyx":3823
* raise ValueError("p < 0.0")
* if fp > 1.0:
* raise ValueError("p > 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_geometric, size, fp)
*
*/
- __pyx_k_tuple_159 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_159)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3793; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_159);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_158));
- PyTuple_SET_ITEM(__pyx_k_tuple_159, 0, ((PyObject *)__pyx_kp_s_158));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_158));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_159));
+ __pyx_k_tuple_165 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_164)); if (unlikely(!__pyx_k_tuple_165)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_165);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_165));
- /* "mtrand.pyx":3801
+ /* "mtrand.pyx":3831
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less(op, 0.0)):
* raise ValueError("p < 0.0") # <<<<<<<<<<<<<<
* if np.any(np.greater(op, 1.0)):
* raise ValueError("p > 1.0")
*/
- __pyx_k_tuple_160 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_160)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_160);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_156));
- PyTuple_SET_ITEM(__pyx_k_tuple_160, 0, ((PyObject *)__pyx_kp_s_156));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_156));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_160));
+ __pyx_k_tuple_166 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_162)); if (unlikely(!__pyx_k_tuple_166)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_166);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_166));
- /* "mtrand.pyx":3803
+ /* "mtrand.pyx":3833
* raise ValueError("p < 0.0")
* if np.any(np.greater(op, 1.0)):
* raise ValueError("p > 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_geometric, size, op)
*
*/
- __pyx_k_tuple_161 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_161)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_161);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_158));
- PyTuple_SET_ITEM(__pyx_k_tuple_161, 0, ((PyObject *)__pyx_kp_s_158));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_158));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_161));
+ __pyx_k_tuple_167 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_164)); if (unlikely(!__pyx_k_tuple_167)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_167);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_167));
- /* "mtrand.pyx":3898
+ /* "mtrand.pyx":3929
* if not PyErr_Occurred():
- * if ngood < 1:
- * raise ValueError("ngood < 1") # <<<<<<<<<<<<<<
- * if nbad < 1:
- * raise ValueError("nbad < 1")
+ * if lngood < 0:
+ * raise ValueError("ngood < 0") # <<<<<<<<<<<<<<
+ * if lnbad < 0:
+ * raise ValueError("nbad < 0")
*/
- __pyx_k_tuple_163 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_163)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3898; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_163);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_162));
- PyTuple_SET_ITEM(__pyx_k_tuple_163, 0, ((PyObject *)__pyx_kp_s_162));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_162));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_163));
+ __pyx_k_tuple_169 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_168)); if (unlikely(!__pyx_k_tuple_169)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3929; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_169);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_169));
- /* "mtrand.pyx":3900
- * raise ValueError("ngood < 1")
- * if nbad < 1:
- * raise ValueError("nbad < 1") # <<<<<<<<<<<<<<
- * if nsample < 1:
+ /* "mtrand.pyx":3931
+ * raise ValueError("ngood < 0")
+ * if lnbad < 0:
+ * raise ValueError("nbad < 0") # <<<<<<<<<<<<<<
+ * if lnsample < 1:
* raise ValueError("nsample < 1")
*/
- __pyx_k_tuple_165 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_165)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3900; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_165);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_164));
- PyTuple_SET_ITEM(__pyx_k_tuple_165, 0, ((PyObject *)__pyx_kp_s_164));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_164));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_165));
+ __pyx_k_tuple_171 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_170)); if (unlikely(!__pyx_k_tuple_171)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3931; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_171);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_171));
- /* "mtrand.pyx":3902
- * raise ValueError("nbad < 1")
- * if nsample < 1:
+ /* "mtrand.pyx":3933
+ * raise ValueError("nbad < 0")
+ * if lnsample < 1:
* raise ValueError("nsample < 1") # <<<<<<<<<<<<<<
- * if ngood + nbad < nsample:
+ * if lngood + lnbad < lnsample:
* raise ValueError("ngood + nbad < nsample")
*/
- __pyx_k_tuple_167 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_167)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3902; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_167);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_166));
- PyTuple_SET_ITEM(__pyx_k_tuple_167, 0, ((PyObject *)__pyx_kp_s_166));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_166));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_167));
+ __pyx_k_tuple_173 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_172)); if (unlikely(!__pyx_k_tuple_173)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3933; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_173);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_173));
- /* "mtrand.pyx":3904
+ /* "mtrand.pyx":3935
* raise ValueError("nsample < 1")
- * if ngood + nbad < nsample:
+ * if lngood + lnbad < lnsample:
* raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<<
* return discnmN_array_sc(self.internal_state, rk_hypergeometric, size,
* lngood, lnbad, lnsample)
*/
- __pyx_k_tuple_169 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_169)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3904; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_169);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_168));
- PyTuple_SET_ITEM(__pyx_k_tuple_169, 0, ((PyObject *)__pyx_kp_s_168));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_168));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_169));
+ __pyx_k_tuple_175 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_174)); if (unlikely(!__pyx_k_tuple_175)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3935; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_175);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_175));
- /* "mtrand.pyx":3915
+ /* "mtrand.pyx":3945
* onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
- * if np.any(np.less(ongood, 1)):
- * raise ValueError("ngood < 1") # <<<<<<<<<<<<<<
- * if np.any(np.less(onbad, 1)):
- * raise ValueError("nbad < 1")
- */
- __pyx_k_tuple_170 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_170)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3915; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_170);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_162));
- PyTuple_SET_ITEM(__pyx_k_tuple_170, 0, ((PyObject *)__pyx_kp_s_162));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_162));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_170));
-
- /* "mtrand.pyx":3917
- * raise ValueError("ngood < 1")
- * if np.any(np.less(onbad, 1)):
- * raise ValueError("nbad < 1") # <<<<<<<<<<<<<<
+ * if np.any(np.less(ongood, 0)):
+ * raise ValueError("ngood < 0") # <<<<<<<<<<<<<<
+ * if np.any(np.less(onbad, 0)):
+ * raise ValueError("nbad < 0")
+ */
+ __pyx_k_tuple_176 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_168)); if (unlikely(!__pyx_k_tuple_176)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3945; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_176);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_176));
+
+ /* "mtrand.pyx":3947
+ * raise ValueError("ngood < 0")
+ * if np.any(np.less(onbad, 0)):
+ * raise ValueError("nbad < 0") # <<<<<<<<<<<<<<
* if np.any(np.less(onsample, 1)):
* raise ValueError("nsample < 1")
*/
- __pyx_k_tuple_171 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_171)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3917; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_171);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_164));
- PyTuple_SET_ITEM(__pyx_k_tuple_171, 0, ((PyObject *)__pyx_kp_s_164));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_164));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_171));
+ __pyx_k_tuple_177 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_170)); if (unlikely(!__pyx_k_tuple_177)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3947; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_177);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_177));
- /* "mtrand.pyx":3919
- * raise ValueError("nbad < 1")
+ /* "mtrand.pyx":3949
+ * raise ValueError("nbad < 0")
* if np.any(np.less(onsample, 1)):
* raise ValueError("nsample < 1") # <<<<<<<<<<<<<<
* if np.any(np.less(np.add(ongood, onbad),onsample)):
* raise ValueError("ngood + nbad < nsample")
*/
- __pyx_k_tuple_172 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_172)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_172);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_166));
- PyTuple_SET_ITEM(__pyx_k_tuple_172, 0, ((PyObject *)__pyx_kp_s_166));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_166));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_172));
+ __pyx_k_tuple_178 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_172)); if (unlikely(!__pyx_k_tuple_178)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3949; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_178);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_178));
- /* "mtrand.pyx":3921
+ /* "mtrand.pyx":3951
* raise ValueError("nsample < 1")
* if np.any(np.less(np.add(ongood, onbad),onsample)):
* raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<<
* return discnmN_array(self.internal_state, rk_hypergeometric, size,
* ongood, onbad, onsample)
*/
- __pyx_k_tuple_173 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_173)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_173);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_168));
- PyTuple_SET_ITEM(__pyx_k_tuple_173, 0, ((PyObject *)__pyx_kp_s_168));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_168));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_173));
+ __pyx_k_tuple_179 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_174)); if (unlikely(!__pyx_k_tuple_179)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3951; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_179);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_179));
- /* "mtrand.pyx":4005
+ /* "mtrand.pyx":4035
* if not PyErr_Occurred():
* if fp <= 0.0:
* raise ValueError("p <= 0.0") # <<<<<<<<<<<<<<
* if fp >= 1.0:
* raise ValueError("p >= 1.0")
*/
- __pyx_k_tuple_175 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_175)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4005; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_175);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_174));
- PyTuple_SET_ITEM(__pyx_k_tuple_175, 0, ((PyObject *)__pyx_kp_s_174));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_174));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_175));
+ __pyx_k_tuple_181 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_180)); if (unlikely(!__pyx_k_tuple_181)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4035; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_181);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_181));
- /* "mtrand.pyx":4007
+ /* "mtrand.pyx":4037
* raise ValueError("p <= 0.0")
* if fp >= 1.0:
* raise ValueError("p >= 1.0") # <<<<<<<<<<<<<<
* return discd_array_sc(self.internal_state, rk_logseries, size, fp)
*
*/
- __pyx_k_tuple_177 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_177)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4007; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_177);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_176));
- PyTuple_SET_ITEM(__pyx_k_tuple_177, 0, ((PyObject *)__pyx_kp_s_176));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_176));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_177));
+ __pyx_k_tuple_183 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_182)); if (unlikely(!__pyx_k_tuple_183)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4037; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_183);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_183));
- /* "mtrand.pyx":4014
+ /* "mtrand.pyx":4044
* op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
* if np.any(np.less_equal(op, 0.0)):
* raise ValueError("p <= 0.0") # <<<<<<<<<<<<<<
* if np.any(np.greater_equal(op, 1.0)):
* raise ValueError("p >= 1.0")
*/
- __pyx_k_tuple_178 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_178)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_178);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_174));
- PyTuple_SET_ITEM(__pyx_k_tuple_178, 0, ((PyObject *)__pyx_kp_s_174));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_174));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_178));
+ __pyx_k_tuple_184 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_180)); if (unlikely(!__pyx_k_tuple_184)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4044; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_184);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_184));
- /* "mtrand.pyx":4016
+ /* "mtrand.pyx":4046
* raise ValueError("p <= 0.0")
* if np.any(np.greater_equal(op, 1.0)):
* raise ValueError("p >= 1.0") # <<<<<<<<<<<<<<
* return discd_array(self.internal_state, rk_logseries, size, op)
*
*/
- __pyx_k_tuple_179 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_179)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4016; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_179);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_176));
- PyTuple_SET_ITEM(__pyx_k_tuple_179, 0, ((PyObject *)__pyx_kp_s_176));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_176));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_179));
+ __pyx_k_tuple_185 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_182)); if (unlikely(!__pyx_k_tuple_185)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4046; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_185);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_185));
- /* "mtrand.pyx":4119
+ /* "mtrand.pyx":4149
* shape = size
* if len(mean.shape) != 1:
* raise ValueError("mean must be 1 dimensional") # <<<<<<<<<<<<<<
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
* raise ValueError("cov must be 2 dimensional and square")
*/
- __pyx_k_tuple_181 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_181)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_181);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_180));
- PyTuple_SET_ITEM(__pyx_k_tuple_181, 0, ((PyObject *)__pyx_kp_s_180));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_180));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_181));
+ __pyx_k_tuple_187 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_186)); if (unlikely(!__pyx_k_tuple_187)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_187);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_187));
- /* "mtrand.pyx":4121
+ /* "mtrand.pyx":4151
* raise ValueError("mean must be 1 dimensional")
* if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
* raise ValueError("cov must be 2 dimensional and square") # <<<<<<<<<<<<<<
* if mean.shape[0] != cov.shape[0]:
* raise ValueError("mean and cov must have same length")
*/
- __pyx_k_tuple_183 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_183)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_183);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_182));
- PyTuple_SET_ITEM(__pyx_k_tuple_183, 0, ((PyObject *)__pyx_kp_s_182));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_182));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_183));
+ __pyx_k_tuple_189 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_188)); if (unlikely(!__pyx_k_tuple_189)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_189);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_189));
- /* "mtrand.pyx":4123
+ /* "mtrand.pyx":4153
* raise ValueError("cov must be 2 dimensional and square")
* if mean.shape[0] != cov.shape[0]:
* raise ValueError("mean and cov must have same length") # <<<<<<<<<<<<<<
* # Compute shape of output
- * if isinstance(shape, int):
+ * if isinstance(shape, (int, long, np.integer)):
*/
- __pyx_k_tuple_185 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_185)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_185);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_184));
- PyTuple_SET_ITEM(__pyx_k_tuple_185, 0, ((PyObject *)__pyx_kp_s_184));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_184));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_185));
+ __pyx_k_tuple_191 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_190)); if (unlikely(!__pyx_k_tuple_191)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_191);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_191));
- /* "mtrand.pyx":4216
+ /* "mtrand.pyx":4157
+ * if isinstance(shape, (int, long, np.integer)):
+ * shape = [shape]
+ * final_shape = list(shape[:]) # <<<<<<<<<<<<<<
+ * final_shape.append(mean.shape[0])
+ * # Create a matrix of independent standard normally distributed random
+ */
+ __pyx_k_slice_192 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_k_slice_192)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_slice_192);
+ __Pyx_GIVEREF(__pyx_k_slice_192);
+
+ /* "mtrand.pyx":4246
*
* if kahan_sum(pix, d-1) > (1.0 + 1e-12):
* raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<<
*
* if size is None:
*/
- __pyx_k_tuple_188 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_188)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_188);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_187));
- PyTuple_SET_ITEM(__pyx_k_tuple_188, 0, ((PyObject *)__pyx_kp_s_187));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_187));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_188));
+ __pyx_k_tuple_195 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_194)); if (unlikely(!__pyx_k_tuple_195)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4246; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_195);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_195));
+
+ /* "mtrand.pyx":4433
+ * # each row. So we can't just use ordinary assignment to swap the
+ * # rows; we need a bounce buffer.
+ * buf = np.empty(x.shape[1:], dtype=x.dtype) # <<<<<<<<<<<<<<
+ * while i > 0:
+ * j = rk_interval(i, self.internal_state)
+ */
+ __pyx_k_slice_196 = PySlice_New(__pyx_int_1, Py_None, Py_None); if (unlikely(!__pyx_k_slice_196)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_slice_196);
+ __Pyx_GIVEREF(__pyx_k_slice_196);
- /* "mtrand.pyx":558
+ /* "mtrand.pyx":559
* """
* cdef rk_state *internal_state
* poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 # <<<<<<<<<<<<<<
*
* def __init__(self, seed=None):
*/
- __pyx_k_tuple_189 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_189)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_189);
- __Pyx_INCREF(((PyObject *)__pyx_n_s__l));
- PyTuple_SET_ITEM(__pyx_k_tuple_189, 0, ((PyObject *)__pyx_n_s__l));
- __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_189));
- __pyx_k_tuple_190 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_190)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_k_tuple_190);
- __Pyx_INCREF(((PyObject *)__pyx_n_s__l));
- PyTuple_SET_ITEM(__pyx_k_tuple_190, 0, ((PyObject *)__pyx_n_s__l));
- __Pyx_GIVEREF(((PyObject *)__pyx_n_s__l));
- __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_190));
+ __pyx_k_tuple_197 = PyTuple_Pack(1, ((PyObject *)__pyx_n_s__l)); if (unlikely(!__pyx_k_tuple_197)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_197);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_197));
+ __pyx_k_tuple_198 = PyTuple_Pack(1, ((PyObject *)__pyx_n_s__l)); if (unlikely(!__pyx_k_tuple_198)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_k_tuple_198);
+ __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_198));
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
@@ -22446,6 +23065,8 @@ static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_624 = PyInt_FromLong(624); if (unlikely(!__pyx_int_624)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
return 0;
@@ -22465,6 +23086,9 @@ PyMODINIT_FUNC PyInit_mtrand(void)
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
@@ -22502,6 +23126,16 @@ PyMODINIT_FUNC PyInit_mtrand(void)
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ Py_INCREF(__pyx_d);
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (!PyDict_GetItemString(modules, "mtrand")) {
+ if (unlikely(PyDict_SetItemString(modules, "mtrand", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ }
+ #endif
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
@@ -22509,6 +23143,9 @@ PyMODINIT_FUNC PyInit_mtrand(void)
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
if (__pyx_module_is_main_mtrand) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
@@ -22524,8 +23161,8 @@ PyMODINIT_FUNC PyInit_mtrand(void)
__pyx_ptype_6mtrand_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_6mtrand_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_6mtrand_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_6mtrand_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyType_Ready(&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__Pyx_SetAttrString(__pyx_m, "RandomState", (PyObject *)&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyType_Ready(&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (__Pyx_SetAttrString(__pyx_m, "RandomState", (PyObject *)&__pyx_type_6mtrand_RandomState) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_6mtrand_RandomState = &__pyx_type_6mtrand_RandomState;
/*--- Type import code ---*/
/*--- Variable import code ---*/
@@ -22545,953 +23182,965 @@ PyMODINIT_FUNC PyInit_mtrand(void)
* import_array()
*
* import numpy as np # <<<<<<<<<<<<<<
+ * import operator
*
- * cdef object cont0_array(rk_state *state, rk_cont0 func, object size):
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mtrand.pyx":127
+ *
+ * import numpy as np
+ * import operator # <<<<<<<<<<<<<<
+ *
+ * cdef object cont0_array(rk_state *state, rk_cont0 func, object size):
+ */
+ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__operator), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__operator, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":558
+ /* "mtrand.pyx":559
* """
* cdef rk_state *internal_state
* poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 # <<<<<<<<<<<<<<
*
* def __init__(self, seed=None):
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__iinfo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__iinfo); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_189), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_197), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__max); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__max); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__sqrt); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__sqrt); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__iinfo); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__iinfo); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_190), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_k_tuple_198), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__max); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__max); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __pyx_t_1 = PyNumber_Multiply(__pyx_t_4, __pyx_int_10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = PyNumber_Multiply(__pyx_t_4, __pyx_int_10); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_4 = PyNumber_Subtract(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyNumber_Subtract(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyDict_SetItem((PyObject *)__pyx_ptype_6mtrand_RandomState->tp_dict, __pyx_n_s__poisson_lam_max, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 558; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem((PyObject *)__pyx_ptype_6mtrand_RandomState->tp_dict, __pyx_n_s__poisson_lam_max, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
PyType_Modified(__pyx_ptype_6mtrand_RandomState);
- /* "mtrand.pyx":919
+ /* "mtrand.pyx":920
*
*
- * def choice(self, a, size=1, replace=True, p=None): # <<<<<<<<<<<<<<
+ * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<<
* """
- * choice(a, size=1, replace=True, p=None)
+ * choice(a, size=None, replace=True, p=None)
*/
- __pyx_t_4 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 919; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 920; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_15 = __pyx_t_4;
+ __pyx_k_17 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":1061
+ /* "mtrand.pyx":1092
*
*
* def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<<
* """
* uniform(low=0.0, high=1.0, size=1)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1092; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_35 = __pyx_t_4;
+ __pyx_k_40 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1061; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1092; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_36 = __pyx_t_4;
+ __pyx_k_41 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":1359
+ /* "mtrand.pyx":1390
* return cont0_array(self.internal_state, rk_gauss, size)
*
* def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* normal(loc=0.0, scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_37 = __pyx_t_4;
+ __pyx_k_42 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1359; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1390; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_38 = __pyx_t_4;
+ __pyx_k_43 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":1518
+ /* "mtrand.pyx":1549
* return cont2_array(self.internal_state, rk_beta, size, oa, ob)
*
* def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* exponential(scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1549; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_48 = __pyx_t_4;
+ __pyx_k_53 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":1682
+ /* "mtrand.pyx":1713
* return cont1_array(self.internal_state, rk_standard_gamma, size, oshape)
*
* def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* gamma(shape, scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1682; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_54 = __pyx_t_4;
+ __pyx_k_59 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":2702
+ /* "mtrand.pyx":2732
* return cont1_array(self.internal_state, rk_power, size, oa)
*
* def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* laplace(loc=0.0, scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2732; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_93 = __pyx_t_4;
+ __pyx_k_98 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2732; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_94 = __pyx_t_4;
+ __pyx_k_99 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":2792
+ /* "mtrand.pyx":2822
* return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale)
*
* def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* gumbel(loc=0.0, scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_97 = __pyx_t_4;
+ __pyx_k_102 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_98 = __pyx_t_4;
+ __pyx_k_103 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":2923
+ /* "mtrand.pyx":2953
* return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale)
*
* def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* logistic(loc=0.0, scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_101 = __pyx_t_4;
+ __pyx_k_106 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2923; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2953; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_102 = __pyx_t_4;
+ __pyx_k_107 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":3011
+ /* "mtrand.pyx":3041
* return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale)
*
* def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<<
* """
* lognormal(mean=0.0, sigma=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3011; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_105 = __pyx_t_4;
+ __pyx_k_110 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3011; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3041; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_106 = __pyx_t_4;
+ __pyx_k_111 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":3132
+ /* "mtrand.pyx":3162
* return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma)
*
* def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<<
* """
* rayleigh(scale=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_111 = __pyx_t_4;
+ __pyx_k_116 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":3581
+ /* "mtrand.pyx":3611
* on, op)
*
* def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<<
* """
* poisson(lam=1.0, size=None)
*/
- __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3581; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3611; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_k_145 = __pyx_t_4;
+ __pyx_k_151 = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
- /* "mtrand.pyx":4462
+ /* "mtrand.pyx":4492
* return arr
*
* _rand = RandomState() # <<<<<<<<<<<<<<
* seed = _rand.seed
* get_state = _rand.get_state
*/
- __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6mtrand_RandomState)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_6mtrand_RandomState)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s___rand, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s___rand, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4463
+ /* "mtrand.pyx":4493
*
* _rand = RandomState()
* seed = _rand.seed # <<<<<<<<<<<<<<
* get_state = _rand.get_state
* set_state = _rand.set_state
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4463; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__seed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4463; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__seed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__seed, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4463; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__seed, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4464
+ /* "mtrand.pyx":4494
* _rand = RandomState()
* seed = _rand.seed
* get_state = _rand.get_state # <<<<<<<<<<<<<<
* set_state = _rand.set_state
* random_sample = _rand.random_sample
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__get_state); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__get_state); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__get_state, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__get_state, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4465
+ /* "mtrand.pyx":4495
* seed = _rand.seed
* get_state = _rand.get_state
* set_state = _rand.set_state # <<<<<<<<<<<<<<
* random_sample = _rand.random_sample
* choice = _rand.choice
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4465; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__set_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4465; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__set_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__set_state, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4465; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__set_state, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4466
+ /* "mtrand.pyx":4496
* get_state = _rand.get_state
* set_state = _rand.set_state
* random_sample = _rand.random_sample # <<<<<<<<<<<<<<
* choice = _rand.choice
* randint = _rand.randint
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4466; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__random_sample); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4466; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__random_sample); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__random_sample, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4466; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__random_sample, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4467
+ /* "mtrand.pyx":4497
* set_state = _rand.set_state
* random_sample = _rand.random_sample
* choice = _rand.choice # <<<<<<<<<<<<<<
* randint = _rand.randint
* bytes = _rand.bytes
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__choice); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__choice); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__choice, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4467; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__choice, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4468
+ /* "mtrand.pyx":4498
* random_sample = _rand.random_sample
* choice = _rand.choice
* randint = _rand.randint # <<<<<<<<<<<<<<
* bytes = _rand.bytes
* uniform = _rand.uniform
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4498; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__randint); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__randint); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4498; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__randint, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__randint, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4498; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4469
+ /* "mtrand.pyx":4499
* choice = _rand.choice
* randint = _rand.randint
* bytes = _rand.bytes # <<<<<<<<<<<<<<
* uniform = _rand.uniform
* rand = _rand.rand
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__bytes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__bytes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__bytes, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4469; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__bytes, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4470
+ /* "mtrand.pyx":4500
* randint = _rand.randint
* bytes = _rand.bytes
* uniform = _rand.uniform # <<<<<<<<<<<<<<
* rand = _rand.rand
* randn = _rand.randn
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__uniform); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__uniform); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__uniform, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__uniform, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4471
+ /* "mtrand.pyx":4501
* bytes = _rand.bytes
* uniform = _rand.uniform
* rand = _rand.rand # <<<<<<<<<<<<<<
* randn = _rand.randn
* random_integers = _rand.random_integers
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__rand, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4471; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__rand, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4472
+ /* "mtrand.pyx":4502
* uniform = _rand.uniform
* rand = _rand.rand
* randn = _rand.randn # <<<<<<<<<<<<<<
* random_integers = _rand.random_integers
* standard_normal = _rand.standard_normal
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__randn); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__randn); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__randn, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__randn, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4473
+ /* "mtrand.pyx":4503
* rand = _rand.rand
* randn = _rand.randn
* random_integers = _rand.random_integers # <<<<<<<<<<<<<<
* standard_normal = _rand.standard_normal
* normal = _rand.normal
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4473; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__random_integers); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4473; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__random_integers); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__random_integers, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4473; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__random_integers, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4474
+ /* "mtrand.pyx":4504
* randn = _rand.randn
* random_integers = _rand.random_integers
* standard_normal = _rand.standard_normal # <<<<<<<<<<<<<<
* normal = _rand.normal
* beta = _rand.beta
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4474; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4474; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__standard_normal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__standard_normal, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4474; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__standard_normal, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4475
+ /* "mtrand.pyx":4505
* random_integers = _rand.random_integers
* standard_normal = _rand.standard_normal
* normal = _rand.normal # <<<<<<<<<<<<<<
* beta = _rand.beta
* exponential = _rand.exponential
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__normal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__normal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__normal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__normal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4476
+ /* "mtrand.pyx":4506
* standard_normal = _rand.standard_normal
* normal = _rand.normal
* beta = _rand.beta # <<<<<<<<<<<<<<
* exponential = _rand.exponential
* standard_exponential = _rand.standard_exponential
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__beta); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__beta); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__beta, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4476; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__beta, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4506; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4477
+ /* "mtrand.pyx":4507
* normal = _rand.normal
* beta = _rand.beta
* exponential = _rand.exponential # <<<<<<<<<<<<<<
* standard_exponential = _rand.standard_exponential
* standard_gamma = _rand.standard_gamma
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4477; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__exponential); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4477; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__exponential); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__exponential, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4477; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__exponential, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4478
+ /* "mtrand.pyx":4508
* beta = _rand.beta
* exponential = _rand.exponential
* standard_exponential = _rand.standard_exponential # <<<<<<<<<<<<<<
* standard_gamma = _rand.standard_gamma
* gamma = _rand.gamma
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s_191); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_199); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s_191, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4478; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_199, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4479
+ /* "mtrand.pyx":4509
* exponential = _rand.exponential
* standard_exponential = _rand.standard_exponential
* standard_gamma = _rand.standard_gamma # <<<<<<<<<<<<<<
* gamma = _rand.gamma
* f = _rand.f
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__standard_gamma); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__standard_gamma); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__standard_gamma, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4479; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__standard_gamma, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4480
+ /* "mtrand.pyx":4510
* standard_exponential = _rand.standard_exponential
* standard_gamma = _rand.standard_gamma
* gamma = _rand.gamma # <<<<<<<<<<<<<<
* f = _rand.f
* noncentral_f = _rand.noncentral_f
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__gamma); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__gamma); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gamma, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4480; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__gamma, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4481
+ /* "mtrand.pyx":4511
* standard_gamma = _rand.standard_gamma
* gamma = _rand.gamma
* f = _rand.f # <<<<<<<<<<<<<<
* noncentral_f = _rand.noncentral_f
* chisquare = _rand.chisquare
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__f); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__f); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__f, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__f, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4482
+ /* "mtrand.pyx":4512
* gamma = _rand.gamma
* f = _rand.f
* noncentral_f = _rand.noncentral_f # <<<<<<<<<<<<<<
* chisquare = _rand.chisquare
* noncentral_chisquare = _rand.noncentral_chisquare
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__noncentral_f); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__noncentral_f); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__noncentral_f, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4482; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__noncentral_f, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4483
+ /* "mtrand.pyx":4513
* f = _rand.f
* noncentral_f = _rand.noncentral_f
* chisquare = _rand.chisquare # <<<<<<<<<<<<<<
* noncentral_chisquare = _rand.noncentral_chisquare
* standard_cauchy = _rand.standard_cauchy
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__chisquare); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__chisquare); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__chisquare, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4483; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__chisquare, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4513; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4484
+ /* "mtrand.pyx":4514
* noncentral_f = _rand.noncentral_f
* chisquare = _rand.chisquare
* noncentral_chisquare = _rand.noncentral_chisquare # <<<<<<<<<<<<<<
* standard_cauchy = _rand.standard_cauchy
* standard_t = _rand.standard_t
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s_192); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_200); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s_192, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4484; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_200, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4485
+ /* "mtrand.pyx":4515
* chisquare = _rand.chisquare
* noncentral_chisquare = _rand.noncentral_chisquare
* standard_cauchy = _rand.standard_cauchy # <<<<<<<<<<<<<<
* standard_t = _rand.standard_t
* vonmises = _rand.vonmises
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__standard_cauchy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__standard_cauchy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__standard_cauchy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4485; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__standard_cauchy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4486
+ /* "mtrand.pyx":4516
* noncentral_chisquare = _rand.noncentral_chisquare
* standard_cauchy = _rand.standard_cauchy
* standard_t = _rand.standard_t # <<<<<<<<<<<<<<
* vonmises = _rand.vonmises
* pareto = _rand.pareto
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__standard_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__standard_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__standard_t, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4486; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__standard_t, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4487
+ /* "mtrand.pyx":4517
* standard_cauchy = _rand.standard_cauchy
* standard_t = _rand.standard_t
* vonmises = _rand.vonmises # <<<<<<<<<<<<<<
* pareto = _rand.pareto
* weibull = _rand.weibull
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4517; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__vonmises); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__vonmises); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4517; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__vonmises, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4487; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__vonmises, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4517; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4488
+ /* "mtrand.pyx":4518
* standard_t = _rand.standard_t
* vonmises = _rand.vonmises
* pareto = _rand.pareto # <<<<<<<<<<<<<<
* weibull = _rand.weibull
* power = _rand.power
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__pareto); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__pareto); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__pareto, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4488; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__pareto, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4489
+ /* "mtrand.pyx":4519
* vonmises = _rand.vonmises
* pareto = _rand.pareto
* weibull = _rand.weibull # <<<<<<<<<<<<<<
* power = _rand.power
* laplace = _rand.laplace
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__weibull); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__weibull); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__weibull, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__weibull, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4490
+ /* "mtrand.pyx":4520
* pareto = _rand.pareto
* weibull = _rand.weibull
* power = _rand.power # <<<<<<<<<<<<<<
* laplace = _rand.laplace
* gumbel = _rand.gumbel
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__power); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__power); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__power, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4490; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__power, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4491
+ /* "mtrand.pyx":4521
* weibull = _rand.weibull
* power = _rand.power
* laplace = _rand.laplace # <<<<<<<<<<<<<<
* gumbel = _rand.gumbel
* logistic = _rand.logistic
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4491; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__laplace); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4491; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__laplace); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__laplace, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4491; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__laplace, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4492
+ /* "mtrand.pyx":4522
* power = _rand.power
* laplace = _rand.laplace
* gumbel = _rand.gumbel # <<<<<<<<<<<<<<
* logistic = _rand.logistic
* lognormal = _rand.lognormal
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__gumbel); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__gumbel); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gumbel, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4492; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__gumbel, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4493
+ /* "mtrand.pyx":4523
* laplace = _rand.laplace
* gumbel = _rand.gumbel
* logistic = _rand.logistic # <<<<<<<<<<<<<<
* lognormal = _rand.lognormal
* rayleigh = _rand.rayleigh
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__logistic); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__logistic); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__logistic, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4493; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__logistic, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4494
+ /* "mtrand.pyx":4524
* gumbel = _rand.gumbel
* logistic = _rand.logistic
* lognormal = _rand.lognormal # <<<<<<<<<<<<<<
* rayleigh = _rand.rayleigh
* wald = _rand.wald
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__lognormal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__lognormal); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__lognormal, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4494; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__lognormal, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4495
+ /* "mtrand.pyx":4525
* logistic = _rand.logistic
* lognormal = _rand.lognormal
* rayleigh = _rand.rayleigh # <<<<<<<<<<<<<<
* wald = _rand.wald
* triangular = _rand.triangular
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__rayleigh); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__rayleigh); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__rayleigh, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4495; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__rayleigh, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4496
+ /* "mtrand.pyx":4526
* lognormal = _rand.lognormal
* rayleigh = _rand.rayleigh
* wald = _rand.wald # <<<<<<<<<<<<<<
* triangular = _rand.triangular
*
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__wald); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__wald); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__wald, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__wald, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4497
+ /* "mtrand.pyx":4527
* rayleigh = _rand.rayleigh
* wald = _rand.wald
* triangular = _rand.triangular # <<<<<<<<<<<<<<
*
* binomial = _rand.binomial
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__triangular); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__triangular); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__triangular, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4497; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__triangular, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4499
+ /* "mtrand.pyx":4529
* triangular = _rand.triangular
*
* binomial = _rand.binomial # <<<<<<<<<<<<<<
* negative_binomial = _rand.negative_binomial
* poisson = _rand.poisson
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__binomial); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__binomial); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__binomial, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4499; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__binomial, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4500
+ /* "mtrand.pyx":4530
*
* binomial = _rand.binomial
* negative_binomial = _rand.negative_binomial # <<<<<<<<<<<<<<
* poisson = _rand.poisson
* zipf = _rand.zipf
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__negative_binomial); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__negative_binomial); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__negative_binomial, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4500; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__negative_binomial, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4501
+ /* "mtrand.pyx":4531
* binomial = _rand.binomial
* negative_binomial = _rand.negative_binomial
* poisson = _rand.poisson # <<<<<<<<<<<<<<
* zipf = _rand.zipf
* geometric = _rand.geometric
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__poisson); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__poisson); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__poisson, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4501; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__poisson, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4502
+ /* "mtrand.pyx":4532
* negative_binomial = _rand.negative_binomial
* poisson = _rand.poisson
* zipf = _rand.zipf # <<<<<<<<<<<<<<
* geometric = _rand.geometric
* hypergeometric = _rand.hypergeometric
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__zipf); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__zipf); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__zipf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__zipf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4503
+ /* "mtrand.pyx":4533
* poisson = _rand.poisson
* zipf = _rand.zipf
* geometric = _rand.geometric # <<<<<<<<<<<<<<
* hypergeometric = _rand.hypergeometric
* logseries = _rand.logseries
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__geometric); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__geometric); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__geometric, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__geometric, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4504
+ /* "mtrand.pyx":4534
* zipf = _rand.zipf
* geometric = _rand.geometric
* hypergeometric = _rand.hypergeometric # <<<<<<<<<<<<<<
* logseries = _rand.logseries
*
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__hypergeometric); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__hypergeometric); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__hypergeometric, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__hypergeometric, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4505
+ /* "mtrand.pyx":4535
* geometric = _rand.geometric
* hypergeometric = _rand.hypergeometric
* logseries = _rand.logseries # <<<<<<<<<<<<<<
*
* multivariate_normal = _rand.multivariate_normal
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__logseries); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__logseries); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__logseries, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4505; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__logseries, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4535; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4507
+ /* "mtrand.pyx":4537
* logseries = _rand.logseries
*
* multivariate_normal = _rand.multivariate_normal # <<<<<<<<<<<<<<
* multinomial = _rand.multinomial
* dirichlet = _rand.dirichlet
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__multivariate_normal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__multivariate_normal); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__multivariate_normal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4507; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__multivariate_normal, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4508
+ /* "mtrand.pyx":4538
*
* multivariate_normal = _rand.multivariate_normal
* multinomial = _rand.multinomial # <<<<<<<<<<<<<<
* dirichlet = _rand.dirichlet
*
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__multinomial); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__multinomial); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__multinomial, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4508; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__multinomial, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4538; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4509
+ /* "mtrand.pyx":4539
* multivariate_normal = _rand.multivariate_normal
* multinomial = _rand.multinomial
* dirichlet = _rand.dirichlet # <<<<<<<<<<<<<<
*
* shuffle = _rand.shuffle
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__dirichlet); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__dirichlet); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__dirichlet, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4509; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__dirichlet, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- /* "mtrand.pyx":4511
+ /* "mtrand.pyx":4541
* dirichlet = _rand.dirichlet
*
* shuffle = _rand.shuffle # <<<<<<<<<<<<<<
* permutation = _rand.permutation
*/
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__shuffle); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s__shuffle); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__shuffle, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__shuffle, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- /* "mtrand.pyx":4512
+ /* "mtrand.pyx":4542
*
* shuffle = _rand.shuffle
* permutation = _rand.permutation # <<<<<<<<<<<<<<
*/
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4542; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__permutation); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s__permutation); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4542; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__permutation, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s__permutation, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4542; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "mtrand.pyx":1
@@ -23501,10 +24150,6 @@ PyMODINIT_FUNC PyInit_mtrand(void)
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_1));
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_193), ((PyObject *)__pyx_kp_u_194)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_195), ((PyObject *)__pyx_kp_u_196)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_197), ((PyObject *)__pyx_kp_u_198)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_199), ((PyObject *)__pyx_kp_u_200)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_201), ((PyObject *)__pyx_kp_u_202)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_203), ((PyObject *)__pyx_kp_u_204)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_205), ((PyObject *)__pyx_kp_u_206)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
@@ -23544,7 +24189,11 @@ PyMODINIT_FUNC PyInit_mtrand(void)
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_273), ((PyObject *)__pyx_kp_u_274)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_275), ((PyObject *)__pyx_kp_u_276)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_277), ((PyObject *)__pyx_kp_u_278)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_279), ((PyObject *)__pyx_kp_u_280)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_281), ((PyObject *)__pyx_kp_u_282)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_283), ((PyObject *)__pyx_kp_u_284)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_t_1, ((PyObject *)__pyx_kp_u_285), ((PyObject *)__pyx_kp_u_286)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s____test__, ((PyObject *)__pyx_t_1)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
goto __pyx_L0;
__pyx_L1_error:;
@@ -23584,17 +24233,32 @@ end:
}
#endif /* CYTHON_REFNANNY */
-static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
- result = PyObject_GetAttr(dict, name);
+#if CYTHON_COMPILING_IN_CPYTHON
+ result = PyDict_GetItem(__pyx_d, name);
+ if (result) {
+ Py_INCREF(result);
+ } else {
+#else
+ result = PyObject_GetItem(__pyx_d, name);
if (!result) {
- if (dict != __pyx_b) {
- PyErr_Clear();
- result = PyObject_GetAttr(__pyx_b, name);
- }
- if (!result) {
- PyErr_SetObject(PyExc_NameError, name);
- }
+ PyErr_Clear();
+#endif
+ result = __Pyx_GetBuiltinName(name);
}
return result;
}
@@ -23668,24 +24332,23 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
}
value = type;
#if PY_VERSION_HEX < 0x02050000
- if (PyInstance_Check(type)) {
- type = (PyObject*) ((PyInstanceObject*)type)->in_class;
- Py_INCREF(type);
- }
- else {
- type = 0;
- PyErr_SetString(PyExc_TypeError,
- "raise: exception must be an old-style class or instance");
- goto raise_error;
- }
- #else
- type = (PyObject*) Py_TYPE(type);
+ if (PyInstance_Check(type)) {
+ type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
- if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
- PyErr_SetString(PyExc_TypeError,
- "raise: exception class must be a subclass of BaseException");
- goto raise_error;
- }
+ } else {
+ type = 0;
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception must be an old-style class or instance");
+ goto raise_error;
+ }
+ #else
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
#endif
}
__Pyx_ErrRestore(type, value, tb);
@@ -23723,8 +24386,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
- }
- else
+ } else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
@@ -23745,18 +24407,22 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject
"raise: exception class must be a subclass of BaseException");
goto bad;
}
+#if PY_VERSION_HEX >= 0x03030000
+ if (cause) {
+#else
if (cause && cause != Py_None) {
+#endif
PyObject *fixed_cause;
- if (PyExceptionClass_Check(cause)) {
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
- }
- else if (PyExceptionInstance_Check(cause)) {
+ } else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
- }
- else {
+ } else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
@@ -23919,6 +24585,181 @@ static void __Pyx_RaiseArgtupleInvalid(
(num_expected == 1) ? "" : "s", num_found);
}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
+ if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+ if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
+ PyObject *r = PyList_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ else if (PyTuple_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ PyErr_Clear();
+ else
+ return NULL;
+ }
+ }
+ return m->sq_item(o, i);
+ }
+ }
+#else
+ if (is_list || PySequence_Check(o)) {
+ return PySequence_GetItem(o, i);
+ }
+#endif
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+ PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+ int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+ PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+ if (likely(ms && ms->sq_slice)) {
+ if (!has_cstart) {
+ if (_py_start && (*_py_start != Py_None)) {
+ cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+ if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstart = 0;
+ }
+ if (!has_cstop) {
+ if (_py_stop && (*_py_stop != Py_None)) {
+ cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+ if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstop = PY_SSIZE_T_MAX;
+ }
+ if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+ Py_ssize_t l = ms->sq_length(obj);
+ if (likely(l >= 0)) {
+ if (cstop < 0) {
+ cstop += l;
+ if (cstop < 0) cstop = 0;
+ }
+ if (cstart < 0) {
+ cstart += l;
+ if (cstart < 0) cstart = 0;
+ }
+ } else {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ PyErr_Clear();
+ else
+ goto bad;
+ }
+ }
+ return ms->sq_slice(obj, cstart, cstop);
+ }
+#endif
+ mp = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(mp && mp->mp_subscript))
+#endif
+ {
+ PyObject* result;
+ PyObject *py_slice, *py_start, *py_stop;
+ if (_py_slice) {
+ py_slice = *_py_slice;
+ } else {
+ PyObject* owned_start = NULL;
+ PyObject* owned_stop = NULL;
+ if (_py_start) {
+ py_start = *_py_start;
+ } else {
+ if (has_cstart) {
+ owned_start = py_start = PyInt_FromSsize_t(cstart);
+ if (unlikely(!py_start)) goto bad;
+ } else
+ py_start = Py_None;
+ }
+ if (_py_stop) {
+ py_stop = *_py_stop;
+ } else {
+ if (has_cstop) {
+ owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+ if (unlikely(!py_stop)) {
+ Py_XDECREF(owned_start);
+ goto bad;
+ }
+ } else
+ py_stop = Py_None;
+ }
+ py_slice = PySlice_New(py_start, py_stop, Py_None);
+ Py_XDECREF(owned_start);
+ Py_XDECREF(owned_stop);
+ if (unlikely(!py_slice)) goto bad;
+ }
+#if CYTHON_COMPILING_IN_CPYTHON
+ result = mp->mp_subscript(obj, py_slice);
+#else
+ result = PyObject_GetItem(obj, py_slice);
+#endif
+ if (!_py_slice) {
+ Py_DECREF(py_slice);
+ }
+ return result;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
+bad:
+ return NULL;
+}
+
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
@@ -24032,6 +24873,104 @@ bad:
return -1;
}
+static CYTHON_INLINE int __Pyx_PyObject_SetSlice(
+ PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+ int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+ PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+ if (likely(ms && ms->sq_ass_slice)) {
+ if (!has_cstart) {
+ if (_py_start && (*_py_start != Py_None)) {
+ cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+ if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstart = 0;
+ }
+ if (!has_cstop) {
+ if (_py_stop && (*_py_stop != Py_None)) {
+ cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+ if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstop = PY_SSIZE_T_MAX;
+ }
+ if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+ Py_ssize_t l = ms->sq_length(obj);
+ if (likely(l >= 0)) {
+ if (cstop < 0) {
+ cstop += l;
+ if (cstop < 0) cstop = 0;
+ }
+ if (cstart < 0) {
+ cstart += l;
+ if (cstart < 0) cstart = 0;
+ }
+ } else {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ PyErr_Clear();
+ else
+ goto bad;
+ }
+ }
+ return ms->sq_ass_slice(obj, cstart, cstop, value);
+ }
+#endif
+ mp = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(mp && mp->mp_ass_subscript))
+#endif
+ {
+ int result;
+ PyObject *py_slice, *py_start, *py_stop;
+ if (_py_slice) {
+ py_slice = *_py_slice;
+ } else {
+ PyObject* owned_start = NULL;
+ PyObject* owned_stop = NULL;
+ if (_py_start) {
+ py_start = *_py_start;
+ } else {
+ if (has_cstart) {
+ owned_start = py_start = PyInt_FromSsize_t(cstart);
+ if (unlikely(!py_start)) goto bad;
+ } else
+ py_start = Py_None;
+ }
+ if (_py_stop) {
+ py_stop = *_py_stop;
+ } else {
+ if (has_cstop) {
+ owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+ if (unlikely(!py_stop)) {
+ Py_XDECREF(owned_start);
+ goto bad;
+ }
+ } else
+ py_stop = Py_None;
+ }
+ py_slice = PySlice_New(py_start, py_stop, Py_None);
+ Py_XDECREF(owned_start);
+ Py_XDECREF(owned_stop);
+ if (unlikely(!py_slice)) goto bad;
+ }
+#if CYTHON_COMPILING_IN_CPYTHON
+ result = mp->mp_ass_subscript(obj, py_slice, value);
+#else
+ result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice);
+#endif
+ if (!_py_slice) {
+ Py_DECREF(py_slice);
+ }
+ return result;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "'%.200s' object does not support slice %s",
+ Py_TYPE(obj)->tp_name, value ? "assignment" : "deletion");
+bad:
+ return -1;
+}
+
static CYTHON_INLINE int __Pyx_CheckKeywordStrings(
PyObject *kwdict,
const char* function_name,
@@ -24083,6 +25022,129 @@ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
return 0;
}
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE void __Pyx_crop_slice(Py_ssize_t* _start, Py_ssize_t* _stop, Py_ssize_t* _length) {
+ Py_ssize_t start = *_start, stop = *_stop, length = *_length;
+ if (start < 0) {
+ start += length;
+ if (start < 0)
+ start = 0;
+ }
+ if (stop < 0)
+ stop += length;
+ else if (stop > length)
+ stop = length;
+ *_length = stop - start;
+ *_start = start;
+ *_stop = stop;
+}
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+static CYTHON_INLINE void __Pyx_copy_object_array(PyObject** restrict src, PyObject** restrict dest, Py_ssize_t length) {
+#else
+static CYTHON_INLINE void __Pyx_copy_object_array(PyObject** src, PyObject** dest, Py_ssize_t length) {
+#endif
+ PyObject *v;
+ Py_ssize_t i;
+ for (i = 0; i < length; i++) {
+ v = dest[i] = src[i];
+ Py_INCREF(v);
+ }
+}
+static CYTHON_INLINE PyObject* __Pyx_PyList_GetSlice(
+ PyObject* src, Py_ssize_t start, Py_ssize_t stop) {
+ PyObject* dest;
+ Py_ssize_t length = PyList_GET_SIZE(src);
+ __Pyx_crop_slice(&start, &stop, &length);
+ if (unlikely(length <= 0))
+ return PyList_New(0);
+ dest = PyList_New(length);
+ if (unlikely(!dest))
+ return NULL;
+ __Pyx_copy_object_array(
+ ((PyListObject*)src)->ob_item + start,
+ ((PyListObject*)dest)->ob_item,
+ length);
+ return dest;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyTuple_GetSlice(
+ PyObject* src, Py_ssize_t start, Py_ssize_t stop) {
+ PyObject* dest;
+ Py_ssize_t length = PyTuple_GET_SIZE(src);
+ __Pyx_crop_slice(&start, &stop, &length);
+ if (unlikely(length <= 0))
+ return PyTuple_New(0);
+ dest = PyTuple_New(length);
+ if (unlikely(!dest))
+ return NULL;
+ __Pyx_copy_object_array(
+ ((PyTupleObject*)src)->ob_item + start,
+ ((PyTupleObject*)dest)->ob_item,
+ length);
+ return dest;
+}
+#endif
+
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
+ int r;
+ if (!j) return -1;
+ r = PyObject_SetItem(o, j, v);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
+ int is_list, int wraparound, int boundscheck) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
+ if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
+ PyObject* old = PyList_GET_ITEM(o, n);
+ Py_INCREF(v);
+ PyList_SET_ITEM(o, n, v);
+ Py_DECREF(old);
+ return 1;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_ass_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ PyErr_Clear();
+ else
+ return -1;
+ }
+ }
+ return m->sq_ass_item(o, i, v);
+ }
+ }
+#else
+#if CYTHON_COMPILING_IN_PYPY
+ if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) {
+#else
+ if (is_list || PySequence_Check(o)) {
+#endif
+ return PySequence_SetItem(o, i, v);
+ }
+#endif
+ return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
+}
+
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
@@ -24114,16 +25176,18 @@ static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb)
#endif
}
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
- PyObject *py_import = 0;
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
- py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
+ #if PY_VERSION_HEX < 0x03030000
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s____import__);
if (!py_import)
goto bad;
+ #endif
if (from_list)
list = from_list;
else {
@@ -24143,13 +25207,17 @@ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
- /* try package relative import first */
+ #if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ #endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
@@ -24160,12 +25228,17 @@ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
}
#endif
if (!module) {
+ #if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
}
}
#else
@@ -24177,8 +25250,10 @@ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
name, global_dict, empty_dict, list, NULL);
#endif
bad:
- Py_XDECREF(empty_list);
+ #if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
@@ -24212,9 +25287,13 @@ static CYTHON_INLINE npy_intp __Pyx_PyInt_from_py_npy_intp(PyObject* x) {
else
return (npy_intp)__Pyx_PyInt_AsSignedLongLong(x);
} else {
+ #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+ #else
npy_intp val;
PyObject *v = __Pyx_PyNumber_Int(x);
- #if PY_VERSION_HEX < 0x03000000
+ #if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
@@ -24231,19 +25310,11 @@ static CYTHON_INLINE npy_intp __Pyx_PyInt_from_py_npy_intp(PyObject* x) {
if (likely(!ret))
return val;
}
+ #endif
return (npy_intp)-1;
}
}
-static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name) {
-#if PY_MAJOR_VERSION < 3
- PyErr_Format(PyExc_ImportError, "cannot import name %.230s",
- PyString_AsString(name));
-#else
- PyErr_Format(PyExc_ImportError, "cannot import name %S", name);
-#endif
-}
-
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_npy_intp(npy_intp val) {
const npy_intp neg_one = (npy_intp)-1, const_zero = (npy_intp)0;
const int is_unsigned = const_zero < neg_one;
@@ -24459,10 +25530,15 @@ static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
return (int)__Pyx_PyInt_AsLong(x);
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24475,6 +25551,16 @@ static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(unsigned long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (unsigned long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
@@ -24482,6 +25568,17 @@ static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
}
return (unsigned long)PyLong_AsUnsignedLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(unsigned long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(unsigned long) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(unsigned long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (unsigned long)PyLong_AsLong(x);
}
} else {
@@ -24494,10 +25591,15 @@ static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
}
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24510,6 +25612,16 @@ static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObje
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(unsigned PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (unsigned PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
@@ -24517,6 +25629,17 @@ static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObje
}
return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(unsigned PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(unsigned PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(unsigned PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
@@ -24529,10 +25652,15 @@ static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObje
}
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
const long neg_one = (long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24545,6 +25673,16 @@ static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
@@ -24552,6 +25690,17 @@ static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
}
return (long)PyLong_AsUnsignedLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(long) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (long)PyLong_AsLong(x);
}
} else {
@@ -24564,10 +25713,15 @@ static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
}
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24580,6 +25734,16 @@ static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
@@ -24587,6 +25751,17 @@ static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
}
return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
@@ -24599,10 +25774,15 @@ static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
}
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
const signed long neg_one = (signed long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24615,6 +25795,16 @@ static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(signed long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (signed long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
@@ -24622,6 +25812,17 @@ static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
}
return (signed long)PyLong_AsUnsignedLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(signed long)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(signed long) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(signed long) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (signed long)PyLong_AsLong(x);
}
} else {
@@ -24634,10 +25835,15 @@ static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
}
}
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+#include "longintrepr.h"
+#endif
+#endif
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
@@ -24650,6 +25856,16 @@ static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject*
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(signed PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return (signed PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
@@ -24657,6 +25873,17 @@ static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject*
}
return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
+#if CYTHON_USE_PYLONG_INTERNALS
+ if (sizeof(digit) <= sizeof(signed PY_LONG_LONG)) {
+ switch (Py_SIZE(x)) {
+ case 0: return 0;
+ case 1: return +(signed PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ case -1: return -(signed PY_LONG_LONG) ((PyLongObject*)x)->ob_digit[0];
+ }
+ }
+#endif
+#endif
return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
@@ -24714,6 +25941,10 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
+ Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+ PyObject *py_basicsize;
+#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
@@ -24733,7 +25964,19 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
module_name, class_name);
goto bad;
}
- if (!strict && (size_t)((PyTypeObject *)result)->tp_basicsize > size) {
+#ifndef Py_LIMITED_API
+ basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+ py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+ if (!py_basicsize)
+ goto bad;
+ basicsize = PyLong_AsSsize_t(py_basicsize);
+ Py_DECREF(py_basicsize);
+ py_basicsize = 0;
+ if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+ goto bad;
+#endif
+ if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
@@ -24743,7 +25986,7 @@ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
- else if ((size_t)((PyTypeObject *)result)->tp_basicsize != size) {
+ else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
@@ -24949,27 +26192,82 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
return 0;
}
-
-/* Type Conversion Functions */
-
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str));
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+#if PY_VERSION_HEX < 0x03030000
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+#else /* PY_VERSION_HEX < 0x03030000 */
+ if (PyUnicode_READY(o) == -1) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (PyUnicode_IS_ASCII(o)) {
+ *length = PyUnicode_GET_DATA_SIZE(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
+#endif /* PY_VERSION_HEX < 0x03030000 */
+ } else
+#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (r < 0) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
-
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
@@ -24985,7 +26283,7 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
}
#endif
if (res) {
-#if PY_VERSION_HEX < 0x03000000
+#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
@@ -25003,7 +26301,6 @@ static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
}
return res;
}
-
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject* x = PyNumber_Index(b);
@@ -25012,7 +26309,6 @@ static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_DECREF(x);
return ival;
}
-
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
@@ -25026,14 +26322,12 @@ static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
#endif
}
-
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
- if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
- return (size_t)-1;
- } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
- PyErr_SetString(PyExc_OverflowError,
- "value too large to convert to size_t");
+ if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
+ if ((val != (unsigned PY_LONG_LONG)-1) || !PyErr_Occurred())
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
return (size_t)-1;
}
return (size_t)val;
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 43eeae5a6..c55d178e6 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -124,6 +124,7 @@ cdef extern from "initarray.h":
import_array()
import numpy as np
+import operator
cdef object cont0_array(rk_state *state, rk_cont0 func, object size):
cdef double *array_data
@@ -537,7 +538,7 @@ cdef class RandomState:
Parameters
----------
- seed : int or array_like, optional
+ seed : {None, int, array_like}, optional
Random seed initializing the pseudo-random number generator.
Can be an integer, an array (or other sequence) of integers of
any length, or ``None`` (the default).
@@ -916,9 +917,9 @@ cdef class RandomState:
return bytestring
- def choice(self, a, size=1, replace=True, p=None):
+ def choice(self, a, size=None, replace=True, p=None):
"""
- choice(a, size=1, replace=True, p=None)
+ choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
@@ -929,8 +930,9 @@ cdef class RandomState:
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
- size : int
- Positive integer, the size of the sample.
+ size : int or tuple of ints, optional
+ Output shape. Default is None, in which case a single value is
+ returned.
replace : boolean, optional
Whether the sample is with or without replacement
p : 1-D array-like, optional
@@ -993,21 +995,24 @@ cdef class RandomState:
"""
# Format and Verify input
- if isinstance(a, int):
- if a > 0:
- pop_size = a #population size
- else:
+ a = np.array(a, copy=False)
+ if a.ndim == 0:
+ try:
+ # __index__ must return an integer by python rules.
+ pop_size = operator.index(a.item())
+ except TypeError:
+ raise ValueError("a must be 1-dimensional or an integer")
+ if pop_size <= 0:
raise ValueError("a must be greater than 0")
+ elif a.ndim != 1:
+ raise ValueError("a must be 1-dimensional")
else:
- a = np.array(a, ndmin=1, copy=0)
- if a.ndim != 1:
- raise ValueError("a must be 1-dimensional")
- pop_size = a.size
+ pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
- p = np.array(p, dtype=np.double, ndmin=1, copy=0)
+ p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
@@ -1017,45 +1022,71 @@ cdef class RandomState:
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
+ shape = size
+ if shape is not None:
+ size = np.prod(shape, dtype=np.intp)
+ else:
+ size = 1
+
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
- uniform_samples = np.random.random(size)
+ uniform_samples = self.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
+ idx = np.array(idx, copy=False) # searchsorted returns a scalar
else:
- idx = self.randint(0, pop_size, size=size)
+ idx = self.randint(0, pop_size, size=shape)
else:
if size > pop_size:
- raise ValueError(''.join(["Cannot take a larger sample than ",
- "population when 'replace=False'"]))
+ raise ValueError("Cannot take a larger sample than "
+ "population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
- found = np.zeros(size, dtype=np.int)
+ found = np.zeros(shape, dtype=np.int)
+ flat_found = found.ravel()
while n_uniq < size:
x = self.rand(size - n_uniq)
if n_uniq > 0:
- p[found[0:n_uniq]] = 0
+ p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
- new = np.unique(new)
- found[n_uniq:n_uniq + new.size] = new
+ _, unique_indices = np.unique(new, return_index=True)
+ unique_indices.sort()
+ new = new.take(unique_indices)
+ flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = self.permutation(pop_size)[:size]
+ if shape is not None:
+ idx.shape = shape
+
+ if shape is None and isinstance(idx, np.ndarray):
+ # In most cases a scalar will have been made an array
+ idx = idx.item(0)
#Use samples as indices for a if a is array-like
- if isinstance(a, int):
+ if a.ndim == 0:
return idx
- else:
- return a.take(idx)
+
+ if shape is not None and idx.ndim == 0:
+ # If size == () then the user requested a 0-d array as opposed to
+ # a scalar object when size is None. However a[idx] is always a
+ # scalar and not an array. So this makes sure the result is an
+ # array, taking into account that np.array(item) may not work
+ # for object arrays.
+ res = np.empty((), dtype=a.dtype)
+ res[()] = a[idx]
+ return res
+
+ return a[idx]
def uniform(self, low=0.0, high=1.0, size=None):
@@ -1483,7 +1514,7 @@ cdef class RandomState:
b : float
Beta, non-negative.
size : tuple of ints, optional
- The number of samples to draw. The ouput is packed according to
+ The number of samples to draw. The output is packed according to
the size given.
Returns
@@ -1808,7 +1839,6 @@ cdef class RandomState:
Notes
-----
-
The F statistic is used to compare in-group variances to between-group
variances. Calculating the distribution depends on the sampling, and
so it is a function of the respective degrees of freedom in the
@@ -3383,13 +3413,13 @@ cdef class RandomState:
Samples are drawn from a Binomial distribution with specified
parameters, n trials and p probability of success where
- n an integer > 0 and p is in the interval [0,1]. (n may be
+ n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
Parameters
----------
n : float (but truncated to an integer)
- parameter, > 0.
+ parameter, >= 0.
p : float
parameter, >= 0 and <=1.
size : {tuple, int}
@@ -3463,8 +3493,8 @@ cdef class RandomState:
fp = PyFloat_AsDouble(p)
ln = PyInt_AsLong(n)
if not PyErr_Occurred():
- if ln <= 0:
- raise ValueError("n <= 0")
+ if ln < 0:
+ raise ValueError("n < 0")
if fp < 0:
raise ValueError("p < 0")
elif fp > 1:
@@ -3475,8 +3505,8 @@ cdef class RandomState:
on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED)
op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- if np.any(np.less_equal(n, 0)):
- raise ValueError("n <= 0")
+ if np.any(np.less(n, 0)):
+ raise ValueError("n < 0")
if np.any(np.less(p, 0)):
raise ValueError("p < 0")
if np.any(np.greater(p, 1)):
@@ -3816,20 +3846,21 @@ cdef class RandomState:
Parameters
----------
- ngood : float (but truncated to an integer)
- parameter, > 0.
- nbad : float
- parameter, >= 0.
- nsample : float
- parameter, > 0 and <= ngood+nbad
- size : {tuple, int}
+ ngood : int or array_like
+ Number of ways to make a good selection. Must be nonnegative.
+ nbad : int or array_like
+ Number of ways to make a bad selection. Must be nonnegative.
+ nsample : int or array_like
+ Number of items sampled. Must be at least 1 and at most
+ ``ngood + nbad``.
+ size : int or tuple of int
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn.
Returns
-------
- samples : {ndarray, scalar}
- where the values are all integers in [0, n].
+ samples : ndarray or scalar
+ The values are all integers in [0, n].
See Also
--------
@@ -3894,27 +3925,26 @@ cdef class RandomState:
lnbad = PyInt_AsLong(nbad)
lnsample = PyInt_AsLong(nsample)
if not PyErr_Occurred():
- if ngood < 1:
- raise ValueError("ngood < 1")
- if nbad < 1:
- raise ValueError("nbad < 1")
- if nsample < 1:
+ if lngood < 0:
+ raise ValueError("ngood < 0")
+ if lnbad < 0:
+ raise ValueError("nbad < 0")
+ if lnsample < 1:
raise ValueError("nsample < 1")
- if ngood + nbad < nsample:
+ if lngood + lnbad < lnsample:
raise ValueError("ngood + nbad < nsample")
return discnmN_array_sc(self.internal_state, rk_hypergeometric, size,
lngood, lnbad, lnsample)
-
PyErr_Clear()
ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED)
onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
- if np.any(np.less(ongood, 1)):
- raise ValueError("ngood < 1")
- if np.any(np.less(onbad, 1)):
- raise ValueError("nbad < 1")
+ if np.any(np.less(ongood, 0)):
+ raise ValueError("ngood < 0")
+ if np.any(np.less(onbad, 0)):
+ raise ValueError("nbad < 0")
if np.any(np.less(onsample, 1)):
raise ValueError("nsample < 1")
if np.any(np.less(np.add(ongood, onbad),onsample)):
@@ -4037,7 +4067,7 @@ cdef class RandomState:
cov : 2-D array_like, of shape (N, N)
Covariance matrix of the distribution. Must be symmetric and
positive semi-definite for "physically meaningful" results.
- size : tuple of ints, optional
+ size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
@@ -4122,7 +4152,7 @@ cdef class RandomState:
if mean.shape[0] != cov.shape[0]:
raise ValueError("mean and cov must have same length")
# Compute shape of output
- if isinstance(shape, int):
+ if isinstance(shape, (int, long, np.integer)):
shape = [shape]
final_shape = list(shape[:])
final_shape.append(mean.shape[0])
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index dde3119b7..917623bba 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
from os.path import join, split, dirname
import os
import sys
diff --git a/numpy/random/setupscons.py b/numpy/random/setupscons.py
deleted file mode 100644
index f5342c39e..000000000
--- a/numpy/random/setupscons.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import glob
-from os.path import join, split
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration, get_mathlibs
- config = Configuration('random',parent_package,top_path)
-
- source_files = [join('mtrand', i) for i in ['mtrand.c',
- 'mtrand.pyx',
- 'numpy.pxi',
- 'randomkit.c',
- 'randomkit.h',
- 'Python.pxi',
- 'initarray.c',
- 'initarray.h',
- 'distributions.c',
- 'distributions.h',
- ]]
- config.add_sconscript('SConstruct', source_files = source_files)
- config.add_data_files(('.', join('mtrand', 'randomkit.h')))
- config.add_data_dir('tests')
-
- return config
-
-def testcode_wincrypt():
- return """\
-/* check to see if _WIN32 is defined */
-int main(int argc, char *argv[])
-{
-#ifdef _WIN32
- return 0;
-#else
- return 1;
-#endif
-}
-"""
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(configuration=configuration)
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index ee40cce69..6959c45dc 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from numpy.testing import TestCase, run_module_suite, assert_,\
assert_raises
from numpy import random
@@ -5,6 +7,17 @@ from numpy.compat import asbytes
import numpy as np
+class TestBinomial(TestCase):
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
+
+
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -138,7 +151,7 @@ class TestRandomDist(TestCase):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
- desired = np.array([2, 1, 3])
+ desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
@@ -149,7 +162,8 @@ class TestRandomDist(TestCase):
def test_choice_exceptions(self):
sample = np.random.choice
- assert_raises(ValueError, sample, -1,3)
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1,2],[3,4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1,2,3,4], 3,
@@ -161,6 +175,42 @@ class TestRandomDist(TestCase):
assert_raises(ValueError, sample, [1,2,3], 2, replace=False,
p=[1,0,0])
+ def test_choice_return_shape(self):
+ p = [0.1,0.9]
+ # Check scalar
+ assert_(np.isscalar(np.random.choice(2, replace=True)))
+ assert_(np.isscalar(np.random.choice(2, replace=False)))
+ assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(np.random.choice([1,2], replace=True)))
+ assert_(np.random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(np.random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(np.random.choice([1,2], s, replace=True)))
+ assert_(np.random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(np.random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2,3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_(np.random.choice(6, s, replace=True).shape, s)
+ assert_(np.random.choice(6, s, replace=False).shape, s)
+ assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
+ assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
+ assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
+
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
@@ -264,6 +314,24 @@ class TestRandomDist(TestCase):
[ 9, 9]])
np.testing.assert_array_equal(actual, desired)
+ # Test nbad = 0
+ actual = np.random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ np.testing.assert_array_equal(actual, desired)
+
+ actual = np.random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ np.testing.assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = np.random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ np.testing.assert_array_equal(actual, desired)
+
+ actual = np.random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ np.testing.assert_array_equal(actual, desired)
+
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index 4168ef9a4..1bba5d91d 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -1,27 +1,28 @@
-from numpy.testing import TestCase, run_module_suite, assert_,\
- assert_array_equal
+from __future__ import division, absolute_import, print_function
+
+from numpy.testing import (TestCase, run_module_suite, assert_,
+ assert_array_equal)
from numpy import random
+from numpy.compat import long
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
- """Make sure generated random variables are in [-pi, pi].
-
- Regression test for ticket #986.
- """
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
- r = random.mtrand.vonmises(mu,1,50)
+ r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
- def test_hypergeometric_range(self) :
- """Test for ticket #921"""
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
- def test_logseries_convergence(self) :
- """Test for ticket #923"""
+ def test_logseries_convergence(self):
+ # Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
@@ -40,11 +41,11 @@ class TestRegression(TestCase):
np.random.seed(1234)
a = np.random.permutation(12)
np.random.seed(1234)
- b = np.random.permutation(12L)
+ b = np.random.permutation(long(12))
assert_array_equal(a, b)
- def test_hypergeometric_range(self) :
- """Test for ticket #1690"""
+ def test_randint_range(self) :
+ # Test for ticket #1690
lmax = np.iinfo('l').max
lmin = np.iinfo('l').min
try:
@@ -53,7 +54,7 @@ class TestRegression(TestCase):
raise AssertionError
def test_shuffle_mixed_dimension(self):
- """Test for trac ticket #2074"""
+ # Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
@@ -63,5 +64,23 @@ class TestRegression(TestCase):
random.shuffle(shuffled)
assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
+ def test_call_within_randomstate(self):
+ # Check that custom RandomState does not call into global state
+ m = np.random.RandomState()
+ res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
+ for i in range(3):
+ np.random.seed(i)
+ m.seed(4321)
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ np.random.multivariate_normal([0], [[0]], size=1)
+ np.random.multivariate_normal([0], [[0]], size=np.int_(1))
+ np.random.multivariate_normal([0], [[0]], size=np.int64(1))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/setup.py b/numpy/setup.py
index c55c85a25..a2f089234 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/setupscons.py b/numpy/setupscons.py
deleted file mode 100644
index 59fa57a4d..000000000
--- a/numpy/setupscons.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-from os.path import join as pjoin
-
-def configuration(parent_package='', top_path=None):
- from numpy.distutils.misc_util import Configuration
- from numpy.distutils.misc_util import scons_generate_config_py
-
- pkgname = 'numpy'
- config = Configuration(pkgname, parent_package, top_path,
- setup_name = 'setupscons.py')
- config.add_subpackage('distutils')
- config.add_subpackage('testing')
- config.add_subpackage('f2py')
- config.add_subpackage('core')
- config.add_subpackage('lib')
- config.add_subpackage('oldnumeric')
- config.add_subpackage('numarray')
- config.add_subpackage('fft')
- config.add_subpackage('linalg')
- config.add_subpackage('random')
- config.add_subpackage('ma')
- config.add_subpackage('matrixlib')
- config.add_subpackage('compat')
- config.add_subpackage('polynomial')
- config.add_subpackage('doc')
- config.add_data_dir('doc')
- config.add_data_dir('tests')
-
- def add_config(*args, **kw):
- # Generate __config__, handle inplace issues.
- if kw['scons_cmd'].inplace:
- target = pjoin(kw['pkg_name'], '__config__.py')
- else:
- target = pjoin(kw['scons_cmd'].build_lib, kw['pkg_name'],
- '__config__.py')
- scons_generate_config_py(target)
- config.add_sconscript(None, post_hook = add_config)
-
- return config
-
-if __name__ == '__main__':
- print 'This is the wrong setup.py file to run'
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index f391c8053..aad448c96 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -3,13 +3,15 @@
This single module should provide all the common functionality for numpy tests
in a single location, so that test scripts can just import it and work right
away.
+
"""
+from __future__ import division, absolute_import, print_function
from unittest import TestCase
-import decorators as dec
-from utils import *
-from numpytest import *
-from nosetester import NoseTester as Tester
-from nosetester import run_module_suite
+from . import decorators as dec
+from .utils import *
+from .numpytest import importall # remove for numpy 1.9.0
+from .nosetester import NoseTester as Tester
+from .nosetester import run_module_suite
test = Tester().test
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 053b99211..91659bb8d 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -13,11 +13,11 @@ function name, setup and teardown functions and so on - see
``nose.tools`` for more information.
"""
+from __future__ import division, absolute_import, print_function
+
import warnings
-import sys
+import collections
-from numpy.testing.utils import \
- WarningManager, WarningMessage
def slow(t):
"""
@@ -122,7 +122,7 @@ def skipif(skip_condition, msg=None):
import nose
# Allow for both boolean or callable skip conditions.
- if callable(skip_condition):
+ if isinstance(skip_condition, collections.Callable):
skip_val = lambda : skip_condition()
else:
skip_val = lambda : skip_condition
@@ -132,9 +132,9 @@ def skipif(skip_condition, msg=None):
if msg is None:
out = 'Test skipped due to test condition'
else:
- out = '\n'+msg
+ out = msg
- return "Skipping test: %s%s" % (func.__name__,out)
+ return "Skipping test: %s: %s" % (func.__name__, out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
@@ -198,7 +198,7 @@ def knownfailureif(fail_condition, msg=None):
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
- if callable(fail_condition):
+ if isinstance(fail_condition, collections.Callable):
fail_val = lambda : fail_condition()
else:
fail_val = lambda : fail_condition
@@ -207,7 +207,7 @@ def knownfailureif(fail_condition, msg=None):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
- from noseclasses import KnownFailureTest
+ from .noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest(msg)
@@ -246,14 +246,12 @@ def deprecated(conditional=True):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
- from noseclasses import KnownFailureTest
+ from .noseclasses import KnownFailureTest
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
- ctx = WarningManager(record=True)
- l = ctx.__enter__()
- warnings.simplefilter('always')
- try:
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
f(*args, **kwargs)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
@@ -261,10 +259,8 @@ def deprecated(conditional=True):
if not l[0].category is DeprecationWarning:
raise AssertionError("First warning for %s is not a " \
"DeprecationWarning( is %s)" % (f.__name__, l[0]))
- finally:
- ctx.__exit__()
- if callable(conditional):
+ if isinstance(conditional, collections.Callable):
cond = conditional()
else:
cond = conditional
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index 2fec800ac..5497bd9ce 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -4,6 +4,7 @@
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
+from __future__ import division, absolute_import, print_function
import os
import doctest
@@ -14,7 +15,7 @@ from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
-from nosetester import get_package_name
+from .nosetester import get_package_name
import inspect
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
@@ -35,7 +36,7 @@ class NumpyDocTestFinder(doctest.DocTestFinder):
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
- return module.__dict__ is object.func_globals
+ return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
@@ -48,7 +49,7 @@ class NumpyDocTestFinder(doctest.DocTestFinder):
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
- return module.__name__ == object.im_class.__module__
+ return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
@@ -100,7 +101,7 @@ class NumpyDocTestFinder(doctest.DocTestFinder):
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
- val = getattr(obj, valname).im_func
+ val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
@@ -165,8 +166,6 @@ class NumpyDoctest(npd.Doctest):
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
- 'scons_support.py',
- 'setupscons.py',
'setup.py']
# Custom classes; class variables to allow subclassing
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 60fec35d6..79ae79765 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -4,10 +4,14 @@ Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
+from __future__ import division, absolute_import, print_function
+
import os
import sys
import warnings
-import numpy.testing.utils
+from numpy.compat import basestring
+from numpy import ModuleDeprecationWarning
+
def get_package_name(filepath):
"""
@@ -56,7 +60,6 @@ def import_nose():
minimum_nose_version = (0,10,0)
try:
import nose
- from nose.tools import raises
except ImportError:
fine_nose = False
else:
@@ -185,6 +188,14 @@ class NoseTester(object):
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
+
+ # When installing with setuptools, and also in some other cases, the
+ # test_*.py files end up marked +x executable. Nose, by default, does
+ # not run files marked with +x as they might be scripts. However, in
+ # our case nose only looks for test_*.py files under the package
+ # directory, which should be safe.
+ argv += ['--exe']
+
if extra_argv:
argv += extra_argv
return argv
@@ -193,19 +204,19 @@ class NoseTester(object):
nose = import_nose()
import numpy
- print "NumPy version %s" % numpy.__version__
+ print("NumPy version %s" % numpy.__version__)
npdir = os.path.dirname(numpy.__file__)
- print "NumPy is installed in %s" % npdir
+ print("NumPy is installed in %s" % npdir)
if 'scipy' in self.package_name:
import scipy
- print "SciPy version %s" % scipy.__version__
+ print("SciPy version %s" % scipy.__version__)
spdir = os.path.dirname(scipy.__file__)
- print "SciPy is installed in %s" % spdir
+ print("SciPy is installed in %s" % spdir)
pyversion = sys.version.replace('\n','')
- print "Python version %s" % pyversion
- print "nose version %d.%d.%d" % nose.__versioninfo__
+ print("Python version %s" % pyversion)
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
def _get_custom_doctester(self):
""" Return instantiated plugin for doctests
@@ -214,7 +225,7 @@ class NoseTester(object):
A return value of None means use the nose builtin doctest plugin
"""
- from noseclasses import NumpyDoctest
+ from .noseclasses import NumpyDoctest
return NumpyDoctest()
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
@@ -243,7 +254,7 @@ class NoseTester(object):
'--cover-tests', '--cover-erase']
# construct list of plugins
import nose.plugins.builtin
- from noseclasses import KnownFailure, Unplugger
+ from .noseclasses import KnownFailure, Unplugger
plugins = [KnownFailure()]
plugins += [p() for p in nose.plugins.builtin.plugins]
# add doctesting if required
@@ -329,13 +340,13 @@ class NoseTester(object):
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
- import utils
+ from . import utils
utils.verbose = verbose
if doctests:
- print "Running unit tests and doctests for %s" % self.package_name
+ print("Running unit tests and doctests for %s" % self.package_name)
else:
- print "Running unit tests for %s" % self.package_name
+ print("Running unit tests for %s" % self.package_name)
self._show_system_info()
@@ -351,29 +362,28 @@ class NoseTester(object):
if raise_warnings in _warn_opts.keys():
raise_warnings = _warn_opts[raise_warnings]
- # Preserve the state of the warning filters
- warn_ctx = numpy.testing.utils.WarningManager()
- warn_ctx.__enter__()
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # If deprecation warnings are not set to 'error' below,
- # at least set them to 'warn'.
- warnings.filterwarnings('always', category=DeprecationWarning)
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- warnings.filterwarnings('ignore', message='Not importing directory')
-
- try:
- from noseclasses import NumpyTestProgram
-
- argv, plugins = self.prepare_test_args(label,
- verbose, extra_argv, doctests, coverage)
+ with warnings.catch_warnings():
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # If deprecation warnings are not set to 'error' below,
+ # at least set them to 'warn'.
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ warnings.filterwarnings('ignore', message='Not importing directory')
+ warnings.filterwarnings("ignore", message="numpy.dtype size changed")
+ warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+ warnings.filterwarnings("ignore", category=ModuleDeprecationWarning)
+ warnings.filterwarnings("ignore", category=FutureWarning)
+
+ from .noseclasses import NumpyTestProgram
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage)
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
- finally:
- warn_ctx.__exit__()
return t.result
@@ -433,7 +443,7 @@ class NoseTester(object):
"""
- print "Running benchmarks for %s" % self.package_name
+ print("Running benchmarks for %s" % self.package_name)
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
@@ -443,7 +453,7 @@ class NoseTester(object):
nose = import_nose()
# get plugin to disable doctests
- from noseclasses import Unplugger
+ from .noseclasses import Unplugger
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
diff --git a/numpy/testing/nulltester.py b/numpy/testing/nulltester.py
deleted file mode 100644
index 8cee43495..000000000
--- a/numpy/testing/nulltester.py
+++ /dev/null
@@ -1,15 +0,0 @@
-''' Null tester to signal nose tests disabled
-
-Merely returns error reporting lack of nose package or version number
-below requirements.
-
-See pkgtester, nosetester modules
-
-'''
-
-class NullTester(object):
- def test(self, labels=None, *args, **kwargs):
- raise ImportError(
- 'Need nose >=0.10 for tests - see %s' %
- 'http://somethingaboutorange.com/mrl/projects/nose')
- bench = test
diff --git a/numpy/testing/numpytest.py b/numpy/testing/numpytest.py
index 683df7a01..2120975df 100644
--- a/numpy/testing/numpytest.py
+++ b/numpy/testing/numpytest.py
@@ -1,34 +1,20 @@
-import os
-import sys
-import traceback
-
-__all__ = ['IgnoreException', 'importall',]
+from __future__ import division, absolute_import, print_function
-DEBUG=0
-get_frame = sys._getframe
+import os
+import warnings
-class IgnoreException(Exception):
- "Ignoring this exception due to disabled feature"
+__all__ = ['importall']
-def output_exception(printstream = sys.stdout):
- try:
- type, value, tb = sys.exc_info()
- info = traceback.extract_tb(tb)
- #this is more verbose
- #traceback.print_exc()
- filename, lineno, function, text = info[-1] # last line only
- msg = "%s:%d: %s: %s (in %s)\n" % (
- filename, lineno, type.__name__, str(value), function)
- printstream.write(msg)
- finally:
- type = value = tb = None # clean up
- return
-
def importall(package):
"""
+ `importall` is DEPRECATED and will be removed in numpy 1.9.0
+
Try recursively to import all subpackages under package.
"""
+ warnings.warn("`importall is deprecated, and will be remobed in numpy 1.9.0",
+ DeprecationWarning)
+
if isinstance(package,str):
package = __import__(package)
@@ -42,9 +28,9 @@ def importall(package):
continue
name = package_name+'.'+subpackage_name
try:
- exec 'import %s as m' % (name)
- except Exception, msg:
- print 'Failed importing %s: %s' %(name, msg)
+ exec('import %s as m' % (name))
+ except Exception as msg:
+ print('Failed importing %s: %s' %(name, msg))
continue
importall(m)
return
diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py
index d87544987..06c968cf4 100755
--- a/numpy/testing/print_coercion_tables.py
+++ b/numpy/testing/print_coercion_tables.py
@@ -1,5 +1,8 @@
#!/usr/bin/env python
-"""Prints type-coercion tables for the built-in NumPy types"""
+"""Prints type-coercion tables for the built-in NumPy types
+
+"""
+from __future__ import division, absolute_import, print_function
import numpy as np
@@ -17,26 +20,26 @@ class GenericObject(object):
dtype = np.dtype('O')
def print_cancast_table(ntypes):
- print 'X',
- for char in ntypes: print char,
- print
+ print('X', end=' ')
+ for char in ntypes: print(char, end=' ')
+ print()
for row in ntypes:
- print row,
+ print(row, end=' ')
for col in ntypes:
- print int(np.can_cast(row, col)),
- print
+ print(int(np.can_cast(row, col)), end=' ')
+ print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
- print '+',
- for char in ntypes: print char,
- print
+ print('+', end=' ')
+ for char in ntypes: print(char, end=' ')
+ print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = np.obj2sctype(row)
- print row,
+ print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
@@ -62,25 +65,25 @@ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray,
char = '@'
except TypeError:
char = '#'
- print char,
- print
+ print(char, end=' ')
+ print()
-print "can cast"
+print("can cast")
print_cancast_table(np.typecodes['All'])
-print
-print "In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'"
-print
-print "scalar + scalar"
+print()
+print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+print()
+print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
-print
-print "scalar + neg scalar"
+print()
+print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
-print
-print "array + scalar"
+print()
+print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
-print
-print "array + neg scalar"
+print()
+print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
-print
-print "promote_types"
+print()
+print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 6d8fc85c5..0e3a9627e 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+from __future__ import division, print_function
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
diff --git a/numpy/testing/setupscons.py b/numpy/testing/setupscons.py
deleted file mode 100755
index ad248d27f..000000000
--- a/numpy/testing/setupscons.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-
-def configuration(parent_package='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('testing',parent_package,top_path)
- return config
-
-if __name__ == '__main__':
- from numpy.distutils.core import setup
- setup(maintainer = "NumPy Developers",
- maintainer_email = "numpy-dev@numpy.org",
- description = "NumPy test module",
- url = "http://www.numpy.org",
- license = "NumPy License (BSD Style)",
- configuration = configuration,
- )
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index a28e24ac3..f3e142d54 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import *
from numpy.testing.noseclasses import KnownFailureTest
@@ -86,7 +88,7 @@ def test_skip_functions_callable():
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
- for i in xrange(x):
+ for i in range(x):
yield i
try:
@@ -100,7 +102,7 @@ def test_skip_generators_hardcoded():
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
- for i in xrange(x):
+ for i in range(x):
yield i
raise DidntSkipException('FAIL')
@@ -119,7 +121,7 @@ def test_skip_generators_callable():
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
- for i in xrange(x):
+ for i in range(x):
yield i
try:
@@ -134,7 +136,7 @@ def test_skip_generators_callable():
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
- for i in xrange(x):
+ for i in range(x):
yield i
raise DidntSkipException('FAIL')
diff --git a/numpy/testing/tests/test_doctesting.py b/numpy/testing/tests/test_doctesting.py
index a34071128..43f9fb6ce 100644
--- a/numpy/testing/tests/test_doctesting.py
+++ b/numpy/testing/tests/test_doctesting.py
@@ -1,5 +1,8 @@
""" Doctests for NumPy-specific nose/doctest modifications
+
"""
+from __future__ import division, absolute_import, print_function
+
# try the #random directive on the output line
def check_random_directive():
'''
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 23b2f8e7b..cbc8cd23e 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import warnings
import sys
@@ -376,7 +378,7 @@ class TestArrayAlmostEqualNulp(unittest.TestCase):
@dec.knownfailureif(True, "Github issue #347")
def test_simple(self):
np.random.seed(12345)
- for i in xrange(100):
+ for i in range(100):
dev = np.random.randn(10)
x = np.ones(10)
y = x + dev * np.finfo(np.float64).eps
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 16ed0f803..235b9e0ba 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -1,14 +1,21 @@
"""
Utility function to facilitate testing.
+
"""
+from __future__ import division, absolute_import, print_function
import os
import sys
import re
import operator
-import types
import warnings
-from nosetester import import_nose
+from .nosetester import import_nose
+from numpy.core import float32, empty, arange
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
@@ -17,10 +24,12 @@ __all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose']
+ 'assert_allclose', 'IgnoreException']
+
verbose = 0
+
def assert_(val, msg='') :
"""
Assert that works in release mode.
@@ -47,7 +56,7 @@ def gisnan(x):
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
- if isinstance(st, types.NotImplementedType):
+ if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
@@ -62,14 +71,11 @@ def gisfinite(x):
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, seterr
- err = seterr(invalid='ignore')
- try:
+ from numpy.core import isfinite, errstate
+ with errstate(invalid='ignore'):
st = isfinite(x)
- if isinstance(st, types.NotImplementedType):
+ if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
- finally:
- seterr(**err)
return st
def gisinf(x):
@@ -83,14 +89,11 @@ def gisinf(x):
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, seterr
- err = seterr(invalid='ignore')
- try:
+ from numpy.core import isinf, errstate
+ with errstate(invalid='ignore'):
st = isinf(x)
- if isinstance(st, types.NotImplementedType):
+ if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
- finally:
- seterr(**err)
return st
def rand(*args):
@@ -344,8 +347,7 @@ def print_assert_equal(test_string,actual,desired):
import pprint
if not (actual == desired):
- import cStringIO
- msg = cStringIO.StringIO()
+ msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual,msg)
@@ -525,18 +527,15 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
import numpy as np
- actual, desired = map(float, (actual, desired))
+
+ (actual, desired) = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- err = np.seterr(invalid='ignore')
- try:
+ with np.errstate(invalid='ignore'):
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10,np.floor(np.log10(scale)))
- finally:
- np.seterr(**err)
-
try:
sc_desired = desired/scale
except ZeroDivisionError:
@@ -643,7 +642,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
- except ValueError, e:
+ except ValueError as e:
import traceback
efmt = traceback.format_exc()
header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
@@ -881,7 +880,7 @@ def assert_array_less(x, y, err_msg='', verbose=True):
header='Arrays are not less-ordered')
def runstring(astr, dict):
- exec astr in dict
+ exec(astr, dict)
def assert_string_equal(actual, desired):
"""
@@ -913,10 +912,12 @@ def assert_string_equal(actual, desired):
import difflib
if not isinstance(actual, str) :
- raise AssertionError(`type(actual)`)
+ raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
- raise AssertionError(`type(desired)`)
- if re.match(r'\A'+desired+r'\Z', actual, re.M): return
+ raise AssertionError(repr(type(desired)))
+ if re.match(r'\A'+desired+r'\Z', actual, re.M):
+ return
+
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
@@ -930,7 +931,7 @@ def assert_string_equal(actual, desired):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
- raise AssertionError(`d2`)
+ raise AssertionError(repr(d2))
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
@@ -941,7 +942,7 @@ def assert_string_equal(actual, desired):
continue
diff_list.extend(l)
continue
- raise AssertionError(`d1`)
+ raise AssertionError(repr(d1))
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
@@ -1050,7 +1051,7 @@ def decorate_methods(cls, decorator, testmatch=None):
# delayed import to reduce startup time
from inspect import isfunction
- methods = filter(isfunction, cls_attr.values())
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
@@ -1108,7 +1109,7 @@ def measure(code_str,times=1,label=None):
elapsed = jiffies()
while i < times:
i += 1
- exec code in globs,locs
+ exec(code, globs,locs)
elapsed = jiffies() - elapsed
return 0.01*elapsed
@@ -1173,6 +1174,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0,
import numpy as np
def compare(x, y):
return np.allclose(x, y, rtol=rtol, atol=atol)
+
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
@@ -1296,7 +1298,7 @@ def nulp_diff(x, y, dtype=None):
Returns
-------
- nulp: array_like
+ nulp : array_like
number of representable floating point numbers between each item in x
and y.
@@ -1366,6 +1368,8 @@ class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
+ Deprecated in 1.8.0
+
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
@@ -1406,6 +1410,8 @@ class WarningManager(object):
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
+ Deprecated in 1.8.0
+
Notes
-----
`WarningManager` is a copy of the ``catch_warnings`` context manager
@@ -1468,13 +1474,8 @@ def assert_warns(warning_class, func, *args, **kw):
The value returned by `func`.
"""
-
- # XXX: once we may depend on python >= 2.6, this can be replaced by the
- # warnings module context manager.
- ctx = WarningManager(record=True)
- l = ctx.__enter__()
- warnings.simplefilter('always')
- try:
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
@@ -1482,8 +1483,6 @@ def assert_warns(warning_class, func, *args, **kw):
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a " \
"%s( is %s)" % (func.__name__, warning_class, l[0]))
- finally:
- ctx.__exit__()
return result
def assert_no_warnings(func, *args, **kw):
@@ -1498,22 +1497,87 @@ def assert_no_warnings(func, *args, **kw):
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
-
+
Returns
-------
The value returned by `func`.
"""
- # XXX: once we may depend on python >= 2.6, this can be replaced by the
- # warnings module context manager.
- ctx = WarningManager(record=True)
- l = ctx.__enter__()
- warnings.simplefilter('always')
- try:
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
result = func(*args, **kw)
if len(l) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, l))
- finally:
- ctx.__exit__()
return result
+
+
+def gen_alignment_data(dtype=float32, type='binary', max_size=24):
+ """
+ generator producing data with different alignment and offsets
+ to test simd vectorization
+
+ Parameters
+ ----------
+ dtype : dtype
+ data type to produce
+ type : string
+ 'unary': create data for unary operations, creates one input
+ and output array
+ 'binary': create data for unary operations, creates two input
+ and output array
+ max_size : integer
+ maximum size of data to produce
+
+ Returns
+ -------
+ if type is 'unary' yields one output, one input array and a message
+ containing information on the data
+ if type is 'binary' yields one output array, two input array and a message
+ containing information on the data
+
+ """
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+ for o in range(3):
+ for s in range(o + 2, max(o + 3, max_size)):
+ if type == 'unary':
+ inp = lambda : arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+ yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place')
+ yield out[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'out of place')
+ yield inp()[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'aliased')
+ yield inp()[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'aliased')
+ if type == 'binary':
+ inp1 = lambda :arange(s, dtype=dtype)[o:]
+ inp2 = lambda :arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'out of place')
+ yield inp1(), inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place1')
+ yield inp2(), inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place2')
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'out of place')
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index ac351191a..ee36e0a9b 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import sys
import numpy as np
@@ -18,10 +20,10 @@ class TestLoadLibrary(TestCase):
try:
cdll = load_library('multiarray',
np.core.multiarray.__file__)
- except ImportError, e:
+ except ImportError as e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
- print msg
+ print(msg)
@dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation")
@dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin")
@@ -34,11 +36,11 @@ class TestLoadLibrary(TestCase):
cdll = load_library('multiarray%s' % so,
np.core.multiarray.__file__)
except ImportError:
- print "No distutils available, skipping test."
- except ImportError, e:
+ print("No distutils available, skipping test.")
+ except ImportError as e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
- print msg
+ print(msg)
class TestNdpointer(TestCase):
def test_dtype(self):
diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py
index 076676495..814c24b0c 100644
--- a/numpy/tests/test_matlib.py
+++ b/numpy/tests/test_matlib.py
@@ -1,6 +1,8 @@
+from __future__ import division, absolute_import, print_function
+
import numpy as np
import numpy.matlib
-from numpy.testing import assert_array_equal, assert_
+from numpy.testing import assert_array_equal, assert_, run_module_suite
def test_empty():
x = np.matlib.empty((2,))